]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - block/blk-mq.c
UBUNTU: [Config] CONFIG_SCSI_ISCSI_ATTRS=y
[mirror_ubuntu-artful-kernel.git] / block / blk-mq.c
index 041f7b7fa0d6def444e9349b6cf748afc8e89b2d..e0523eb8eee1afe93722054bcb8dc8a5af9c1bf9 100644 (file)
@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        struct elevator_queue *e = q->elevator;
        struct request *rq;
        unsigned int tag;
+       struct blk_mq_ctx *local_ctx = NULL;
 
        blk_queue_enter_live(q);
        data->q = q;
        if (likely(!data->ctx))
-               data->ctx = blk_mq_get_ctx(q);
+               data->ctx = local_ctx = blk_mq_get_ctx(q);
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
        if (op & REQ_NOWAIT)
@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
        tag = blk_mq_get_tag(data);
        if (tag == BLK_MQ_TAG_FAIL) {
+               if (local_ctx) {
+                       blk_mq_put_ctx(local_ctx);
+                       data->ctx = NULL;
+               }
                blk_queue_exit(q);
                return NULL;
        }
@@ -355,13 +360,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
                return ERR_PTR(ret);
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
-
-       blk_mq_put_ctx(alloc_data.ctx);
        blk_queue_exit(q);
 
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
+       blk_mq_put_ctx(alloc_data.ctx);
+
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -406,7 +411,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
        alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
        rq = blk_mq_get_request(q, NULL, op, &alloc_data);
-
        blk_queue_exit(q);
 
        if (!rq)
@@ -679,8 +683,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
                                    unsigned long msecs)
 {
-       kblockd_schedule_delayed_work(&q->requeue_work,
-                                     msecs_to_jiffies(msecs));
+       kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
+                                   msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
 
@@ -1353,6 +1357,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
+/*
+ * Should only be used carefully, when the caller knows we want to
+ * bypass a potential IO scheduler on the target device.
+ */
+void blk_mq_request_bypass_insert(struct request *rq)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+
+       spin_lock(&hctx->lock);
+       list_add_tail(&rq->queuelist, &hctx->dispatch);
+       spin_unlock(&hctx->lock);
+
+       blk_mq_run_hw_queue(hctx, false);
+}
+
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                            struct list_head *list)