* task has exited, don't wait
*/
cic = cfqd->active_cic;
- if (!cic || !cic->ioc->task)
+ if (!cic || !atomic_read(&cic->ioc->nr_tasks))
return;
/*
ioc->ioc_data = NULL;
+ spin_lock(&ioc->lock);
+
while ((n = rb_first(&ioc->cic_root)) != NULL) {
__cic = rb_entry(n, struct cfq_io_context, rb_node);
rb_erase(&__cic->rb_node, &ioc->cic_root);
if (ioc_gone && !elv_ioc_count_read(ioc_count))
complete(ioc_gone);
+
+ spin_unlock(&ioc->lock);
}
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
ioc->ioc_data = NULL;
+ spin_lock(&ioc->lock);
/*
* put the reference this task is holding to the various queues
*/
cfq_exit_single_io_context(__cic);
n = rb_next(n);
}
+
+ spin_unlock(&ioc->lock);
}
static struct cfq_io_context *
struct cfq_io_context *cic;
struct rb_node *n;
+ spin_lock(&ioc->lock);
+
ioc->ioprio_changed = 0;
n = rb_first(&ioc->cic_root);
changed_ioprio(cic);
n = rb_next(n);
}
+
+ spin_unlock(&ioc->lock);
}
static struct cfq_queue *
if (cic && cic->key == cfqd)
return cic;
+ spin_lock(&ioc->lock);
restart:
n = ioc->cic_root.rb_node;
while (n) {
n = n->rb_right;
else {
ioc->ioc_data = cic;
+ spin_unlock(&ioc->lock);
return cic;
}
}
+ spin_unlock(&ioc->lock);
return NULL;
}
unsigned long flags;
void *k;
+ spin_lock(&ioc->lock);
cic->ioc = ioc;
cic->key = cfqd;
spin_lock_irqsave(cfqd->queue->queue_lock, flags);
list_add(&cic->queue_list, &cfqd->cic_list);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ spin_unlock(&ioc->lock);
}
/*
enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
+ if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
(cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {