]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
s390/scm: use multiple queues
authorSebastian Ott <sebott@linux.vnet.ibm.com>
Fri, 24 Feb 2017 16:50:17 +0000 (17:50 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 12 Jun 2017 14:25:56 +0000 (16:25 +0200)
Exploit multiple hardware contexts (queues) that can process
requests in parallel.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk.h

index bba798c699f181a91cec13e7b34d9790fe6ebe26..725f912fab41c286e9b16775c9d12522767b58ca 100644 (file)
@@ -273,30 +273,36 @@ static void scm_request_start(struct scm_request *scmrq)
        }
 }
 
+struct scm_queue {
+       struct scm_request *scmrq;
+       spinlock_t lock;
+};
+
 static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
                           const struct blk_mq_queue_data *qd)
 {
        struct scm_device *scmdev = hctx->queue->queuedata;
        struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+       struct scm_queue *sq = hctx->driver_data;
        struct request *req = qd->rq;
        struct scm_request *scmrq;
 
-       spin_lock(&bdev->rq_lock);
+       spin_lock(&sq->lock);
        if (!scm_permit_request(bdev, req)) {
-               spin_unlock(&bdev->rq_lock);
+               spin_unlock(&sq->lock);
                return BLK_MQ_RQ_QUEUE_BUSY;
        }
 
-       scmrq = hctx->driver_data;
+       scmrq = sq->scmrq;
        if (!scmrq) {
                scmrq = scm_request_fetch();
                if (!scmrq) {
                        SCM_LOG(5, "no request");
-                       spin_unlock(&bdev->rq_lock);
+                       spin_unlock(&sq->lock);
                        return BLK_MQ_RQ_QUEUE_BUSY;
                }
                scm_request_init(bdev, scmrq);
-               hctx->driver_data = scmrq;
+               sq->scmrq = scmrq;
        }
        scm_request_set(scmrq, req);
 
@@ -307,20 +313,43 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
                if (scmrq->aob->request.msb_count)
                        scm_request_start(scmrq);
 
-               hctx->driver_data = NULL;
-               spin_unlock(&bdev->rq_lock);
+               sq->scmrq = NULL;
+               spin_unlock(&sq->lock);
                return BLK_MQ_RQ_QUEUE_BUSY;
        }
        blk_mq_start_request(req);
 
        if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
                scm_request_start(scmrq);
-               hctx->driver_data = NULL;
+               sq->scmrq = NULL;
        }
-       spin_unlock(&bdev->rq_lock);
+       spin_unlock(&sq->lock);
        return BLK_MQ_RQ_QUEUE_OK;
 }
 
+static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                            unsigned int idx)
+{
+       struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
+
+       if (!qd)
+               return -ENOMEM;
+
+       spin_lock_init(&qd->lock);
+       hctx->driver_data = qd;
+
+       return 0;
+}
+
+static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
+{
+       struct scm_queue *qd = hctx->driver_data;
+
+       WARN_ON(qd->scmrq);
+       kfree(hctx->driver_data);
+       hctx->driver_data = NULL;
+}
+
 static void __scmrq_log_error(struct scm_request *scmrq)
 {
        struct aob *aob = scmrq->aob;
@@ -396,6 +425,8 @@ static const struct block_device_operations scm_blk_devops = {
 static const struct blk_mq_ops scm_mq_ops = {
        .queue_rq = scm_blk_request,
        .complete = scm_blk_request_done,
+       .init_hctx = scm_blk_init_hctx,
+       .exit_hctx = scm_blk_exit_hctx,
 };
 
 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
@@ -413,12 +444,11 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
 
        bdev->scmdev = scmdev;
        bdev->state = SCM_OPER;
-       spin_lock_init(&bdev->rq_lock);
        spin_lock_init(&bdev->lock);
        atomic_set(&bdev->queued_reqs, 0);
 
        bdev->tag_set.ops = &scm_mq_ops;
-       bdev->tag_set.nr_hw_queues = 1;
+       bdev->tag_set.nr_hw_queues = nr_requests;
        bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
        bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 
index f7b4d9ba43d13130f83d9c44b83bd89bd61ffbe1..242d17a91920acdf545694baef63956a717d2098 100644 (file)
@@ -19,8 +19,7 @@ struct scm_blk_dev {
        struct gendisk *gendisk;
        struct blk_mq_tag_set tag_set;
        struct scm_device *scmdev;
-       spinlock_t rq_lock;     /* guard the request queue */
-       spinlock_t lock;        /* guard the rest of the blockdev */
+       spinlock_t lock;
        atomic_t queued_reqs;
        enum {SCM_OPER, SCM_WR_PROHIBIT} state;
        struct list_head finished_requests;