]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - block/blk-mq.c
block: deal with stale req count of plug list
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
index d180c989a0e52238ab140ffdb0648801a662b30d..f39e69c732cc628c7fa54802160a2c495b28e87d 100644 (file)
@@ -1291,11 +1291,11 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        return rq;
 }
 
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
-                                     struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 {
        int ret;
        struct request_queue *q = rq->q;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .list = NULL,
@@ -1414,11 +1414,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
                        rcu_read_lock();
-                       blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie);
                        rcu_read_unlock();
                } else {
                        srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-                       blk_mq_try_issue_directly(data.hctx, old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie);
                        srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
                }
                goto done;
@@ -1497,6 +1497,13 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                struct request *last = NULL;
 
                blk_mq_bio_to_request(rq, bio);
+
+               /*
+                * @request_count may become stale because of schedule
+                * out, so check the list again.
+                */
+               if (list_empty(&plug->mq_list))
+                       request_count = 0;
                if (!request_count)
                        trace_block_plug(q);
                else
@@ -2461,6 +2468,60 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 }
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
+static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+       struct request_queue *q = hctx->queue;
+       long state;
+
+       hctx->poll_considered++;
+
+       state = current->state;
+       while (!need_resched()) {
+               int ret;
+
+               hctx->poll_invoked++;
+
+               ret = q->mq_ops->poll(hctx, rq->tag);
+               if (ret > 0) {
+                       hctx->poll_success++;
+                       set_current_state(TASK_RUNNING);
+                       return true;
+               }
+
+               if (signal_pending_state(state, current))
+                       set_current_state(TASK_RUNNING);
+
+               if (current->state == TASK_RUNNING)
+                       return true;
+               if (ret < 0)
+                       break;
+               cpu_relax();
+       }
+
+       return false;
+}
+
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+{
+       struct blk_mq_hw_ctx *hctx;
+       struct blk_plug *plug;
+       struct request *rq;
+
+       if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
+           !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+               return false;
+
+       plug = current->plug;
+       if (plug)
+               blk_flush_plug_list(plug, false);
+
+       hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+       rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+
+       return __blk_mq_poll(hctx, rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_poll);
+
 void blk_mq_disable_hotplug(void)
 {
        mutex_lock(&all_q_mutex);