When taskq_dispatch() calls taskq_thread_spawn() to create a new thread
for a taskq, linux lockdep warns of possible recursive locking. This is
a false positive.
One such call chain is as follows, when a taskq needs more threads:
taskq_dispatch->taskq_thread_spawn->taskq_dispatch
The initial taskq_dispatch() holds tq_lock on the taskq that needed more
worker threads. The later call into taskq_dispatch() takes
dynamic_taskq->tq_lock. Without subclassing, lockdep believes these
could potentially be the same lock and complains. A similar case occurs
when taskq_dispatch() then calls task_alloc().
This patch uses spin_lock_irqsave_nested() when taking tq_lock, with one
of two new lock subclasses:
subclass taskq
TQ_LOCK_DYNAMIC dynamic_taskq
TQ_LOCK_GENERAL any other
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #480
#define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000
#define TQ_NEW 0x04000000
#define TQ_FRONT 0x08000000
+/* spin_lock(lock) and spin_lock_nested(lock,0) are equivalent,
+ * so TQ_LOCK_DYNAMIC must not evaluate to 0
+ */
+typedef enum tq_lock_role {
+ TQ_LOCK_GENERAL = 0,
+ TQ_LOCK_DYNAMIC = 1,
+} tq_lock_role_t;
+
typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *);
typedef unsigned long taskqid_t;
typedef void (task_func_t)(void *);
struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */
struct list_head tq_delay_list; /* delayed task_t's */
wait_queue_head_t tq_work_waitq; /* new work waitq */
wait_queue_head_t tq_wait_waitq; /* wait waitq */
+ tq_lock_role_t tq_lock_class; /* class used when taking tq_lock */
} taskq_t;
typedef struct taskq_ent {
} taskq_t;
typedef struct taskq_ent {
*/
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
*/
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
if (count < 100) {
count++;
goto retry;
if (count < 100) {
count++;
goto retry;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
t = kmem_alloc(sizeof(taskq_ent_t), task_km_flags(flags));
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
if (t) {
taskq_init_ent(t);
if (t) {
taskq_init_ent(t);
taskq_t *tq = t->tqent_taskq;
struct list_head *l;
taskq_t *tq = t->tqent_taskq;
struct list_head *l;
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
if (t->tqent_flags & TQENT_FLAG_CANCEL) {
ASSERT(list_empty(&t->tqent_list));
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
rc = (taskq_find(tq, id, &active) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
rc = (taskq_find(tq, id, &active) == NULL);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
rc = (tq->tq_lowest_id == tq->tq_next_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
found = taskq_member_impl(tq, t);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
found = taskq_member_impl(tq, t);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
t = taskq_find(tq, id, &active);
if (t && !active) {
list_del_init(&t->tqent_list);
t = taskq_find(tq, id, &active);
if (t && !active) {
list_del_init(&t->tqent_list);
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
del_timer_sync(&t->tqent_timer);
if (timer_pending(&t->tqent_timer)) {
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
del_timer_sync(&t->tqent_timer);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock,
+ tq->tq_lock_flags, tq->tq_lock_class);
}
if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
}
if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
ASSERT(tq);
ASSERT(func);
ASSERT(tq);
ASSERT(func);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
ASSERT(tq);
ASSERT(func);
ASSERT(tq);
ASSERT(func);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE))
ASSERT(tq);
ASSERT(func);
ASSERT(tq);
ASSERT(func);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TASKQ_ACTIVE)) {
(void) taskq_thread_create(tq);
(void) taskq_thread_create(tq);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
}
/*
tq->tq_nspawn--;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
}
/*
- * Spawn addition threads for dynamic taskqs (TASKQ_DYNMAIC) the current
+ * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
* number of threads is insufficient to handle the pending tasks. These
* new threads must be created by the dedicated dynamic_taskq to avoid
* deadlocks between thread creation and memory reclaim. The system_taskq
* number of threads is insufficient to handle the pending tasks. These
* new threads must be created by the dedicated dynamic_taskq to avoid
* deadlocks between thread creation and memory reclaim. The system_taskq
int seq_tasks = 0;
ASSERT(tqt);
int seq_tasks = 0;
ASSERT(tqt);
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
tq = tqt->tqt_tq;
current->flags |= PF_NOFREEZE;
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
/* Immediately exit if more threads than allowed were created. */
if (tq->tq_nthreads >= tq->tq_maxthreads)
/* Immediately exit if more threads than allowed were created. */
if (tq->tq_nthreads >= tq->tq_maxthreads)
schedule();
seq_tasks = 0;
schedule();
seq_tasks = 0;
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock,
+ tq->tq_lock_flags, tq->tq_lock_class);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tq->tq_work_waitq, &wait);
} else {
__set_current_state(TASK_RUNNING);
/* Perform the requested task */
t->tqent_func(t->tqent_arg);
/* Perform the requested task */
t->tqent_func(t->tqent_arg);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock,
+ tq->tq_lock_flags, tq->tq_lock_class);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
tqt->tqt_task = NULL;
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
INIT_LIST_HEAD(&tq->tq_delay_list);
init_waitqueue_head(&tq->tq_work_waitq);
init_waitqueue_head(&tq->tq_wait_waitq);
+ tq->tq_lock_class = TQ_LOCK_GENERAL;
if (flags & TASKQ_PREPOPULATE) {
if (flags & TASKQ_PREPOPULATE) {
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
for (i = 0; i < minalloc; i++)
task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW));
taskq_ent_t *t;
ASSERT(tq);
taskq_ent_t *t;
ASSERT(tq);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
/*
* Signal each thread to exit and block until it does. Each thread
/*
* Signal each thread to exit and block until it does. Each thread
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ spin_lock_irqsave_nested(&tq->tq_lock, tq->tq_lock_flags,
+ tq->tq_lock_class);
}
while (!list_empty(&tq->tq_free_list)) {
}
while (!list_empty(&tq->tq_free_list)) {
+ /* This is used to annotate tq_lock, so
+ * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
+ * does not trigger a lockdep warning re: possible recursive locking
+ */
+ dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
+