#include <sys/taskq.h>
#include <sys/kmem.h>
-#include <spl-debug.h>
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
+int spl_taskq_thread_bind = 0;
+module_param(spl_taskq_thread_bind, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
-#define SS_DEBUG_SUBSYS SS_TASKQ
+
+int spl_taskq_thread_dynamic = 1;
+module_param(spl_taskq_thread_dynamic, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
+
+int spl_taskq_thread_priority = 1;
+module_param(spl_taskq_thread_priority, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_priority,
+ "Allow non-default priority for taskq threads");
+
+int spl_taskq_thread_sequential = 4;
+module_param(spl_taskq_thread_sequential, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_sequential,
+ "Create new taskq threads after N sequential tasks");
/* Global system-wide dynamic task queue available for all consumers */
taskq_t *system_taskq;
EXPORT_SYMBOL(system_taskq);
+/* Private dedicated taskq for creating new taskq threads on demand. */
+static taskq_t *dynamic_taskq;
+static taskq_thread_t *taskq_thread_create(taskq_t *);
+
static int
task_km_flags(uint_t flags)
{
{
taskq_ent_t *t;
int count = 0;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
- SRETURN(t);
+ return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
- SRETURN(NULL);
+ return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
- SRETURN(NULL);
+ return (NULL);
/*
* Sleep periodically polling the free list for an available
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (count < 100)
- SGOTO(retry, count++);
+ if (count < 100) {
+ count++;
+ goto retry;
+ }
}
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nalloc++;
}
- SRETURN(t);
+ return (t);
}
/*
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
kmem_free(t, sizeof(taskq_ent_t));
tq->tq_nalloc--;
-
- SEXIT;
}
/*
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
} else {
task_free(tq, t);
}
-
- SEXIT;
}
/*
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
- SRETURN(lowest_id);
+ return (lowest_id);
}
/*
taskq_thread_t *w;
struct list_head *l;
- SENTRY;
ASSERT(tq);
ASSERT(tqt);
ASSERT(spin_is_locked(&tq->tq_lock));
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
-
- SEXIT;
}
/*
{
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
- SRETURN(t);
+ return (t);
if (t->tqent_id > id)
break;
}
- SRETURN(NULL);
+ return (NULL);
}
/*
taskq_thread_t *tqt;
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
*active = 0;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
- SRETURN(t);
+ return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
t = tqt->tqt_task;
*active = 1;
- SRETURN(t);
+ return (t);
}
}
- SRETURN(NULL);
+ return (NULL);
}
/*
- * The taskq_wait_id() function blocks until the passed task id completes.
- * This does not guarantee that all lower task id's have completed.
+ * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
+ * taskq_wait() functions below.
+ *
+ * Taskq waiting is accomplished by tracking the lowest outstanding task
+ * id and the next available task id. As tasks are dispatched they are
+ * added to the tail of the pending, priority, or delay lists. As worker
+ * threads become available the tasks are removed from the heads of these
+ * lists and linked to the worker threads. This ensures the lists are
+ * kept sorted by lowest to highest task id.
+ *
+ * Therefore the lowest outstanding task id can be quickly determined by
+ * checking the head item from all of these lists. This value is stored
+ * with the taskq as the lowest id. It only needs to be recalculated when
+ * either the task with the current lowest id completes or is canceled.
+ *
+ * By blocking until the lowest task id exceeds the passed task id the
+ * taskq_wait_outstanding() function can be easily implemented. Similarly,
+ * by blocking until the lowest task id matches the next task id taskq_wait()
+ * can be implemented.
+ *
+ * Callers should be aware that when there are multiple worked threads it
+ * is possible for larger task ids to complete before smaller ones. Also
+ * when the taskq contains delay tasks with small task ids callers may
+ * block for a considerable length of time waiting for them to expire and
+ * execute.
*/
-void
-taskq_wait_id(taskq_t *tq, taskqid_t id)
+static int
+taskq_wait_id_check(taskq_t *tq, taskqid_t id)
{
- DEFINE_WAIT(wait);
- taskq_ent_t *t;
int active = 0;
- SENTRY;
-
- ASSERT(tq);
- ASSERT(id > 0);
+ int rc;
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- t = taskq_find(tq, id, &active);
- if (t)
- prepare_to_wait(&t->tqent_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ rc = (taskq_find(tq, id, &active) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- /*
- * We rely on the kernels autoremove_wake_function() function to
- * remove us from the wait queue in the context of wake_up().
- * Once woken the taskq_ent_t pointer must never be accessed.
- */
- if (t) {
- t = NULL;
- schedule();
- __set_current_state(TASK_RUNNING);
- }
-
- SEXIT;
+ return (rc);
}
-EXPORT_SYMBOL(taskq_wait_id);
/*
- * The taskq_wait() function will block until all previously submitted
- * tasks have been completed. A previously submitted task is defined as
- * a task with a lower task id than the current task queue id. Note that
- * all task id's are assigned monotonically at dispatch time.
- *
- * Waiting for all previous tasks to complete is accomplished by tracking
- * the lowest outstanding task id. As tasks are dispatched they are added
- * added to the tail of the pending, priority, or delay lists. And as
- * worker threads become available the tasks are removed from the heads
- * of these lists and linked to the worker threads. This ensures the
- * lists are kept in lowest to highest task id order.
- *
- * Therefore the lowest outstanding task id can be quickly determined by
- * checking the head item from all of these lists. This value is stored
- * with the task queue as the lowest id. It only needs to be recalculated
- * when either the task with the current lowest id completes or is canceled.
- *
- * By blocking until the lowest task id exceeds the current task id when
- * the function was called we ensure all previous tasks have completed.
- *
- * NOTE: When there are multiple worked threads it is possible for larger
- * task ids to complete before smaller ones. Conversely when the task
- * queue contains delay tasks with small task ids, you may block for a
- * considerable length of time waiting for them to expire and execute.
+ * The taskq_wait_id() function blocks until the passed task id completes.
+ * This does not guarantee that all lower task ids have completed.
*/
+void
+taskq_wait_id(taskq_t *tq, taskqid_t id)
+{
+ wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
+}
+EXPORT_SYMBOL(taskq_wait_id);
+
static int
-taskq_wait_check(taskq_t *tq, taskqid_t id)
+taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
{
int rc;
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
+/*
+ * The taskq_wait_outstanding() function will block until all tasks with a
+ * lower taskqid than the passed 'id' have been completed. Note that all
+ * task id's are assigned monotonically at dispatch time. Zero may be
+ * passed for the id to indicate all tasks dispatch up to this point,
+ * but not after, should be waited for.
+ */
void
-taskq_wait_all(taskq_t *tq, taskqid_t id)
+taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
{
- wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id));
+ wait_event(tq->tq_wait_waitq,
+ taskq_wait_outstanding_check(tq, id ? id : tq->tq_next_id - 1));
}
-EXPORT_SYMBOL(taskq_wait_all);
+EXPORT_SYMBOL(taskq_wait_outstanding);
-void
-taskq_wait(taskq_t *tq)
+static int
+taskq_wait_check(taskq_t *tq)
{
- taskqid_t id;
- SENTRY;
- ASSERT(tq);
+ int rc;
- /* Wait for the largest outstanding taskqid */
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- id = tq->tq_next_id - 1;
+ rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- taskq_wait_all(tq, id);
-
- SEXIT;
+ return (rc);
+}
+/*
+ * The taskq_wait() function will block until the taskq is empty.
+ * This means that if a taskq re-dispatches work to itself taskq_wait()
+ * callers will block indefinitely.
+ */
+void
+taskq_wait(taskq_t *tq)
+{
+ wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
}
EXPORT_SYMBOL(taskq_wait);
-int
-taskq_member(taskq_t *tq, void *t)
+static int
+taskq_member_impl(taskq_t *tq, void *t)
{
struct list_head *l;
taskq_thread_t *tqt;
- SENTRY;
+ int found = 0;
ASSERT(tq);
ASSERT(t);
+ ASSERT(spin_is_locked(&tq->tq_lock));
list_for_each(l, &tq->tq_thread_list) {
tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
- if (tqt->tqt_thread == (struct task_struct *)t)
- SRETURN(1);
+ if (tqt->tqt_thread == (struct task_struct *)t) {
+ found = 1;
+ break;
+ }
}
+ return (found);
+}
+
+int
+taskq_member(taskq_t *tq, void *t)
+{
+ int found;
- SRETURN(0);
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ found = taskq_member_impl(tq, t);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
+ return (found);
}
EXPORT_SYMBOL(taskq_member);
taskq_ent_t *t;
int active = 0;
int rc = ENOENT;
- SENTRY;
ASSERT(tq);
rc = EBUSY;
}
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
+static int taskq_thread_spawn(taskq_t *tq);
+
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
/* Taskq being destroyed and all tasks drained */
- if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ if (!(tq->tq_flags & TASKQ_ACTIVE))
+ goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
+
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
- taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
+ taskq_ent_t *t;
ASSERT(tq);
ASSERT(func);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
/* Taskq being destroyed and all tasks drained */
- if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ if (!(tq->tq_flags & TASKQ_ACTIVE))
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
spin_unlock(&t->tqent_lock);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(func);
- ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
/* Taskq being destroyed and all tasks drained */
- if (!(tq->tq_flags & TQ_ACTIVE)) {
+ if (!(tq->tq_flags & TASKQ_ACTIVE)) {
t->tqent_id = 0;
goto out;
}
wake_up(&tq->tq_work_waitq);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads)
+ (void) taskq_thread_spawn(tq);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SEXIT;
}
EXPORT_SYMBOL(taskq_dispatch_ent);
}
EXPORT_SYMBOL(taskq_init_ent);
+/*
+ * Return the next pending task, preference is given to tasks on the
+ * priority list which were dispatched with TQ_FRONT.
+ */
+static taskq_ent_t *
+taskq_next_ent(taskq_t *tq)
+{
+ struct list_head *list;
+
+ ASSERT(spin_is_locked(&tq->tq_lock));
+
+ if (!list_empty(&tq->tq_prio_list))
+ list = &tq->tq_prio_list;
+ else if (!list_empty(&tq->tq_pend_list))
+ list = &tq->tq_pend_list;
+ else
+ return (NULL);
+
+ return (list_entry(list->next, taskq_ent_t, tqent_list));
+}
+
+/*
+ * Spawns a new thread for the specified taskq.
+ */
+static void
+taskq_thread_spawn_task(void *arg)
+{
+ taskq_t *tq = (taskq_t *)arg;
+
+ (void) taskq_thread_create(tq);
+
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ tq->tq_nspawn--;
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+}
+
+/*
+ * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
+ * number of threads is insufficient to handle the pending tasks. These
+ * new threads must be created by the dedicated dynamic_taskq to avoid
+ * deadlocks between thread creation and memory reclaim. The system_taskq
+ * which is also a dynamic taskq cannot be safely used for this.
+ */
+static int
+taskq_thread_spawn(taskq_t *tq)
+{
+ int spawning = 0;
+
+ if (!(tq->tq_flags & TASKQ_DYNAMIC))
+ return (0);
+
+ if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
+ (tq->tq_flags & TASKQ_ACTIVE)) {
+ spawning = (++tq->tq_nspawn);
+ taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
+ tq, TQ_NOSLEEP);
+ }
+
+ return (spawning);
+}
+
+/*
+ * Threads in a dynamic taskq should only exit once it has been completely
+ * drained and no other threads are actively servicing tasks. This prevents
+ * threads from being created and destroyed more than is required.
+ *
+ * The first thread is the thread list is treated as the primary thread.
+ * There is nothing special about the primary thread but in order to avoid
+ * all the taskq pids from changing we opt to make it long running.
+ */
+static int
+taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
+{
+ ASSERT(spin_is_locked(&tq->tq_lock));
+
+ if (!(tq->tq_flags & TASKQ_DYNAMIC))
+ return (0);
+
+ if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
+ tqt_thread_list) == tqt)
+ return (0);
+
+ return
+ ((tq->tq_nspawn == 0) && /* No threads are being spawned */
+ (tq->tq_nactive == 0) && /* No threads are handling tasks */
+ (tq->tq_nthreads > 1) && /* More than 1 thread is running */
+ (!taskq_next_ent(tq)) && /* There are no pending tasks */
+ (spl_taskq_thread_dynamic));/* Dynamic taskqs are allowed */
+}
+
static int
taskq_thread(void *args)
{
taskq_thread_t *tqt = args;
taskq_t *tq;
taskq_ent_t *t;
- struct list_head *pend_list;
- SENTRY;
+ int seq_tasks = 0;
ASSERT(tqt);
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
+ #if defined(PF_MEMALLOC_NOIO)
+ (void) memalloc_noio_save();
+ #endif
+
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+
+ /* Immediately exit if more threads than allowed were created. */
+ if (tq->tq_nthreads >= tq->tq_maxthreads)
+ goto error;
+
tq->tq_nthreads++;
+ list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
wake_up(&tq->tq_wait_waitq);
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&tq->tq_pend_list) &&
list_empty(&tq->tq_prio_list)) {
+
+ if (taskq_thread_should_stop(tq, tqt)) {
+ wake_up_all(&tq->tq_wait_waitq);
+ break;
+ }
+
add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
schedule();
+ seq_tasks = 0;
+
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
}
-
- if (!list_empty(&tq->tq_prio_list))
- pend_list = &tq->tq_prio_list;
- else if (!list_empty(&tq->tq_pend_list))
- pend_list = &tq->tq_pend_list;
- else
- pend_list = NULL;
-
- if (pend_list) {
- t = list_entry(pend_list->next,taskq_ent_t,tqent_list);
+ if ((t = taskq_next_ent(tq)) != NULL) {
list_del_init(&t->tqent_list);
/* In order to support recursively dispatching a
tqt->tqt_task = NULL;
/* For prealloc'd tasks, we don't free anything. */
- if ((tq->tq_flags & TASKQ_DYNAMIC) ||
- !(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
+ if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
task_done(tq, t);
/* When the current lowest outstanding taskqid is
ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
}
+ /* Spawn additional taskq threads if required. */
+ if ((++seq_tasks) > spl_taskq_thread_sequential &&
+ taskq_thread_spawn(tq))
+ seq_tasks = 0;
+
tqt->tqt_id = 0;
tqt->tqt_flags = 0;
wake_up_all(&tq->tq_wait_waitq);
+ } else {
+ if (taskq_thread_should_stop(tq, tqt))
+ break;
}
set_current_state(TASK_INTERRUPTIBLE);
__set_current_state(TASK_RUNNING);
tq->tq_nthreads--;
list_del_init(&tqt->tqt_thread_list);
- kmem_free(tqt, sizeof(taskq_thread_t));
-
+error:
+ kmem_free(tqt, sizeof (taskq_thread_t));
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(0);
+ return (0);
+}
+
+static taskq_thread_t *
+taskq_thread_create(taskq_t *tq)
+{
+ static int last_used_cpu = 0;
+ taskq_thread_t *tqt;
+
+ tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
+ INIT_LIST_HEAD(&tqt->tqt_thread_list);
+ INIT_LIST_HEAD(&tqt->tqt_active_list);
+ tqt->tqt_tq = tq;
+ tqt->tqt_id = 0;
+
+ tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
+ "%s", tq->tq_name);
+ if (tqt->tqt_thread == NULL) {
+ kmem_free(tqt, sizeof (taskq_thread_t));
+ return (NULL);
+ }
+
+ if (spl_taskq_thread_bind) {
+ last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
+ kthread_bind(tqt->tqt_thread, last_used_cpu);
+ }
+
+ if (spl_taskq_thread_priority)
+ set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
+
+ wake_up_process(tqt->tqt_thread);
+
+ return (tqt);
}
taskq_t *
{
taskq_t *tq;
taskq_thread_t *tqt;
- int rc = 0, i, j = 0;
- SENTRY;
+ int count = 0, rc = 0, i;
ASSERT(name != NULL);
- ASSERT(pri <= maxclsyspri);
ASSERT(minalloc >= 0);
ASSERT(maxalloc <= INT_MAX);
- ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */
+ ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
/* Scale the number of threads using nthreads as a percentage */
if (flags & TASKQ_THREADS_CPU_PCT) {
nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
}
- tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
+ tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
if (tq == NULL)
- SRETURN(NULL);
+ return (NULL);
spin_lock_init(&tq->tq_lock);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list);
- tq->tq_name = name;
- tq->tq_nactive = 0;
- tq->tq_nthreads = 0;
- tq->tq_pri = pri;
- tq->tq_minalloc = minalloc;
- tq->tq_maxalloc = maxalloc;
- tq->tq_nalloc = 0;
- tq->tq_flags = (flags | TQ_ACTIVE);
- tq->tq_next_id = 1;
- tq->tq_lowest_id = 1;
+ tq->tq_name = strdup(name);
+ tq->tq_nactive = 0;
+ tq->tq_nthreads = 0;
+ tq->tq_nspawn = 0;
+ tq->tq_maxthreads = nthreads;
+ tq->tq_pri = pri;
+ tq->tq_minalloc = minalloc;
+ tq->tq_maxalloc = maxalloc;
+ tq->tq_nalloc = 0;
+ tq->tq_flags = (flags | TASKQ_ACTIVE);
+ tq->tq_next_id = 1;
+ tq->tq_lowest_id = 1;
INIT_LIST_HEAD(&tq->tq_free_list);
INIT_LIST_HEAD(&tq->tq_pend_list);
INIT_LIST_HEAD(&tq->tq_prio_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
- if (flags & TASKQ_PREPOPULATE)
+ if (flags & TASKQ_PREPOPULATE) {
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
- spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+ spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+ }
+
+ if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
+ nthreads = 1;
for (i = 0; i < nthreads; i++) {
- tqt = kmem_alloc(sizeof(*tqt), KM_PUSHPAGE);
- INIT_LIST_HEAD(&tqt->tqt_thread_list);
- INIT_LIST_HEAD(&tqt->tqt_active_list);
- tqt->tqt_tq = tq;
- tqt->tqt_id = 0;
-
- tqt->tqt_thread = kthread_create(taskq_thread, tqt,
- "%s/%d", name, i);
- if (tqt->tqt_thread) {
- list_add(&tqt->tqt_thread_list, &tq->tq_thread_list);
- kthread_bind(tqt->tqt_thread, i % num_online_cpus());
- set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri));
- wake_up_process(tqt->tqt_thread);
- j++;
- } else {
- kmem_free(tqt, sizeof(taskq_thread_t));
+ tqt = taskq_thread_create(tq);
+ if (tqt == NULL)
rc = 1;
- }
+ else
+ count++;
}
/* Wait for all threads to be started before potential destroy */
- wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j);
+ wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
if (rc) {
taskq_destroy(tq);
tq = NULL;
}
- SRETURN(tq);
+ return (tq);
}
EXPORT_SYMBOL(taskq_create);
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
- SENTRY;
ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- tq->tq_flags &= ~TQ_ACTIVE;
+ tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- /* TQ_ACTIVE cleared prevents new tasks being added to pending */
+ /*
+ * When TASKQ_ACTIVE is clear new tasks may not be added nor may
+ * new worker threads be spawned for dynamic taskq.
+ */
+ if (dynamic_taskq != NULL)
+ taskq_wait_outstanding(dynamic_taskq, 0);
+
taskq_wait(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
*/
while (!list_empty(&tq->tq_thread_list)) {
tqt = list_entry(tq->tq_thread_list.next,
- taskq_thread_t, tqt_thread_list);
+ taskq_thread_t, tqt_thread_list);
thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
task_free(tq, t);
}
- ASSERT(tq->tq_nthreads == 0);
- ASSERT(tq->tq_nalloc == 0);
+ ASSERT0(tq->tq_nthreads);
+ ASSERT0(tq->tq_nalloc);
+ ASSERT0(tq->tq_nspawn);
ASSERT(list_empty(&tq->tq_thread_list));
ASSERT(list_empty(&tq->tq_active_list));
ASSERT(list_empty(&tq->tq_free_list));
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- kmem_free(tq, sizeof(taskq_t));
-
- SEXIT;
+ strfree(tq->tq_name);
+ kmem_free(tq, sizeof (taskq_t));
}
EXPORT_SYMBOL(taskq_destroy);
int
spl_taskq_init(void)
{
- SENTRY;
-
- /* Solaris creates a dynamic taskq of up to 64 threads, however in
- * a Linux environment 1 thread per-core is usually about right */
- system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
- minclsyspri, 4, 512, TASKQ_PREPOPULATE);
+ system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
+ maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_taskq == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
+ maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
+ if (dynamic_taskq == NULL) {
+ taskq_destroy(system_taskq);
+ return (1);
+ }
+
+ return (0);
}
void
spl_taskq_fini(void)
{
- SENTRY;
+ taskq_destroy(dynamic_taskq);
+ dynamic_taskq = NULL;
+
taskq_destroy(system_taskq);
- SEXIT;
+ system_taskq = NULL;
}