A preallocated taskq_ent_t's tqent_flags must be checked prior to
servicing the taskq_ent_t. Once a preallocated taskq entry is serviced,
the ownership of the entry is handed back to the caller of
taskq_dispatch, thus the entry's contents can potentially be mangled.
In particular, this is a problem in the case where a preallocated taskq
entry is serviced, and the caller clears it's tqent_flags field. Thus,
when the function returns and task_done is called, it looks as though
the entry is **not** a preallocated task (when in fact it **is** a
preallocated task).
In this situation, task_done will place the preallocated taskq_ent_t
structure onto the taskq_t's free list. This is a **huge** mistake. If
the taskq_ent_t is then freed by the caller of taskq_dispatch, the
taskq_t's free list will hold a pointer to garbage data. Even worse, if
nothing has over written the freed memory before the pointer is
dereferenced, it may still look as though it points to a valid list_head
belonging to a taskq_ent_t structure.
Thus, the task entry's flags are now copied prior to servicing the task.
This copy is then checked to see if it is a preallocated task, and
determine if the entry needs to be passed down to the task_done
function.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #71
struct task_struct *tqt_thread;
taskq_t *tqt_tq;
taskqid_t tqt_id;
struct task_struct *tqt_thread;
taskq_t *tqt_tq;
taskqid_t tqt_id;
} taskq_thread_t;
/* Global system-wide dynamic task queue available for all consumers */
} taskq_thread_t;
/* Global system-wide dynamic task queue available for all consumers */
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
- /* For prealloc'd tasks, we don't free anything. */
- if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
- (t->tqent_flags & TQENT_FLAG_PREALLOC))
- return;
-
list_del_init(&t->tqent_list);
if (tq->tq_nalloc <= tq->tq_minalloc) {
list_del_init(&t->tqent_list);
if (tq->tq_nalloc <= tq->tq_minalloc) {
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
t->tqent_func = NULL;
t->tqent_arg = NULL;
t->tqent_flags = 0;
list_add_tail(&t->tqent_list, &tq->tq_free_list);
} else {
task_free(tq, t);
list_add_tail(&t->tqent_list, &tq->tq_free_list);
} else {
task_free(tq, t);
if (pend_list) {
t = list_entry(pend_list->next, taskq_ent_t, tqent_list);
list_del_init(&t->tqent_list);
if (pend_list) {
t = list_entry(pend_list->next, taskq_ent_t, tqent_list);
list_del_init(&t->tqent_list);
/* In order to support recursively dispatching a
* preallocated taskq_ent_t, tqent_id must be
* stored prior to executing tqent_func. */
tqt->tqt_id = t->tqent_id;
/* In order to support recursively dispatching a
* preallocated taskq_ent_t, tqent_id must be
* stored prior to executing tqent_func. */
tqt->tqt_id = t->tqent_id;
+
+ /* We must store a copy of the flags prior to
+ * servicing the task (servicing a prealloc'd task
+ * returns the ownership of the tqent back to
+ * the caller of taskq_dispatch). Thus,
+ * tqent_flags _may_ change within the call. */
+ tqt->tqt_flags = t->tqent_flags;
+
taskq_insert_in_order(tq, tqt);
tq->tq_nactive++;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
taskq_insert_in_order(tq, tqt);
tq->tq_nactive++;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nactive--;
list_del_init(&tqt->tqt_active_list);
+
+ /* For prealloc'd tasks, we don't free anything. */
+ if ((tq->tq_flags & TASKQ_DYNAMIC) ||
+ !(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
+ task_done(tq, t);
/* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id */
/* When the current lowest outstanding taskqid is
* done calculate the new lowest outstanding id */
wake_up_all(&tq->tq_wait_waitq);
}
wake_up_all(&tq->tq_wait_waitq);
}