]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - block/blk-core.c
compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE
[mirror_ubuntu-bionic-kernel.git] / block / blk-core.c
index 3ba4326a63b59632fad81e686bf8229eee07320b..f885b65324c22249f1b388c3ae6a1a2fd2b34bad 100644 (file)
@@ -143,6 +143,7 @@ static const struct {
        [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
        [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
        [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+       [BLK_STS_DEV_RESOURCE]  = { -EBUSY,     "device resource" },
        [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
 
        /* device mapper special case, should not leak out: */
@@ -339,7 +340,6 @@ void blk_sync_queue(struct request_queue *q)
                struct blk_mq_hw_ctx *hctx;
                int i;
 
-               cancel_delayed_work_sync(&q->requeue_work);
                queue_for_each_hw_ctx(q, hctx, i)
                        cancel_delayed_work_sync(&hctx->run_work);
        } else {
@@ -699,6 +699,18 @@ void blk_cleanup_queue(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
+       /*
+        * make sure all in-progress dispatch are completed because
+        * blk_freeze_queue() can only complete all requests, and
+        * dispatch may still be in-progress since we dispatch requests
+        * from more than one contexts.
+        *
+        * We rely on driver to deal with the race in case that queue
+        * initialization isn't done.
+        */
+       if (q->mq_ops && blk_queue_init_done(q))
+               blk_mq_quiesce_queue(q);
+
        /* for synchronous bio-based driver finish in-flight integrity i/o */
        blk_flush_integrity();
 
@@ -707,7 +719,8 @@ void blk_cleanup_queue(struct request_queue *q)
        blk_sync_queue(q);
 
        if (q->mq_ops)
-               blk_mq_free_queue(q);
+               blk_mq_exit_queue(q);
+
        percpu_ref_exit(&q->q_usage_counter);
 
        spin_lock_irq(lock);
@@ -812,9 +825,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
        while (true) {
                bool success = false;
-               int ret;
 
-               rcu_read_lock_sched();
+               rcu_read_lock();
                if (percpu_ref_tryget_live(&q->q_usage_counter)) {
                        /*
                         * The code that sets the PREEMPT_ONLY flag is
@@ -827,7 +839,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                                percpu_ref_put(&q->q_usage_counter);
                        }
                }
-               rcu_read_unlock_sched();
+               rcu_read_unlock();
 
                if (success)
                        return 0;
@@ -844,14 +856,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                 */
                smp_rmb();
 
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               (atomic_read(&q->mq_freeze_depth) == 0 &&
-                                (preempt || !blk_queue_preempt_only(q))) ||
-                               blk_queue_dying(q));
+               wait_event(q->mq_freeze_wq,
+                          (atomic_read(&q->mq_freeze_depth) == 0 &&
+                           (preempt || !blk_queue_preempt_only(q))) ||
+                          blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
-               if (ret)
-                       return ret;
        }
 }
 
@@ -875,6 +885,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
        kblockd_schedule_work(&q->timeout_work);
 }
 
+static void blk_timeout_work_dummy(struct work_struct *work)
+{
+}
+
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
@@ -909,7 +923,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
                    laptop_mode_timer_fn, 0);
        timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
-       INIT_WORK(&q->timeout_work, NULL);
+       INIT_WORK(&q->timeout_work, blk_timeout_work_dummy);
        INIT_LIST_HEAD(&q->queue_head);
        INIT_LIST_HEAD(&q->timeout_list);
        INIT_LIST_HEAD(&q->icq_list);
@@ -1040,7 +1054,7 @@ int blk_init_allocated_queue(struct request_queue *q)
 {
        WARN_ON_ONCE(q->mq_ops);
 
-       q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+       q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
        if (!q->fq)
                return -ENOMEM;
 
@@ -1077,6 +1091,7 @@ out_exit_flush_rq:
                q->exit_rq_fn(q, q->fq->flush_rq);
 out_free_flush_queue:
        blk_free_flush_queue(q->fq);
+       q->fq = NULL;
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -2392,7 +2407,7 @@ blk_qc_t submit_bio(struct bio *bio)
                unsigned int count;
 
                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
-                       count = queue_logical_block_size(bio->bi_disk->queue);
+                       count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
                else
                        count = bio_sectors(bio);
 
@@ -2493,8 +2508,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
                 * bypass a potential scheduler on the bottom device for
                 * insert.
                 */
-               blk_mq_request_bypass_insert(rq, true);
-               return BLK_STS_OK;
+               return blk_mq_request_issue_directly(rq);
        }
 
        spin_lock_irqsave(q->queue_lock, flags);
@@ -3251,6 +3265,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 {
        if (bio_has_data(bio))
                rq->nr_phys_segments = bio_phys_segments(q, bio);
+       else if (bio_op(bio) == REQ_OP_DISCARD)
+               rq->nr_phys_segments = 1;
 
        rq->__data_len = bio->bi_iter.bi_size;
        rq->bio = rq->biotail = bio;
@@ -3334,6 +3350,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
@@ -3641,9 +3661,11 @@ EXPORT_SYMBOL(blk_finish_plug);
  */
 void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
 {
-       /* not support for RQF_PM and ->rpm_status in blk-mq yet */
-       if (q->mq_ops)
+       /* Don't enable runtime PM for blk-mq until it is ready */
+       if (q->mq_ops) {
+               pm_runtime_disable(dev);
                return;
+       }
 
        q->dev = dev;
        q->rpm_status = RPM_ACTIVE;