[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
+ [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
[BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
/* device mapper special case, should not leak out: */
struct blk_mq_hw_ctx *hctx;
int i;
- cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
} else {
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /*
+ * make sure all in-progress dispatch are completed because
+ * blk_freeze_queue() can only complete all requests, and
+ * dispatch may still be in-progress since we dispatch requests
+ * from more than one contexts.
+ *
+ * We rely on driver to deal with the race in case that queue
+ * initialization isn't done.
+ */
+ if (q->mq_ops && blk_queue_init_done(q))
+ blk_mq_quiesce_queue(q);
+
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
blk_sync_queue(q);
if (q->mq_ops)
- blk_mq_free_queue(q);
+ blk_mq_exit_queue(q);
+
percpu_ref_exit(&q->q_usage_counter);
spin_lock_irq(lock);
while (true) {
bool success = false;
- int ret;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
* The code that sets the PREEMPT_ONLY flag is
percpu_ref_put(&q->q_usage_counter);
}
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
if (success)
return 0;
*/
smp_rmb();
- ret = wait_event_interruptible(q->mq_freeze_wq,
- (atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
- blk_queue_dying(q));
+ wait_event(q->mq_freeze_wq,
+ (atomic_read(&q->mq_freeze_depth) == 0 &&
+ (preempt || !blk_queue_preempt_only(q))) ||
+ blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
- if (ret)
- return ret;
}
}
kblockd_schedule_work(&q->timeout_work);
}
+static void blk_timeout_work_dummy(struct work_struct *work)
+{
+}
+
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
- INIT_WORK(&q->timeout_work, NULL);
+ INIT_WORK(&q->timeout_work, blk_timeout_work_dummy);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
{
WARN_ON_ONCE(q->mq_ops);
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
if (!q->fq)
return -ENOMEM;
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
+ q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- count = queue_logical_block_size(bio->bi_disk->queue);
+ count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
else
count = bio_sectors(bio);
* bypass a potential scheduler on the bottom device for
* insert.
*/
- blk_mq_request_bypass_insert(rq, true);
- return BLK_STS_OK;
+ return blk_mq_request_issue_directly(rq);
}
spin_lock_irqsave(q->queue_lock, flags);
{
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
+ else if (bio_op(bio) == REQ_OP_DISCARD)
+ rq->nr_phys_segments = 1;
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
+ if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ dst->special_vec = src->special_vec;
+ }
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
- /* not support for RQF_PM and ->rpm_status in blk-mq yet */
- if (q->mq_ops)
+ /* Don't enable runtime PM for blk-mq until it is ready */
+ if (q->mq_ops) {
+ pm_runtime_disable(dev);
return;
+ }
q->dev = dev;
q->rpm_status = RPM_ACTIVE;