]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
scsi: cxlflash: Combine the send queue locks
authorUma Krishnan <ukrishn@linux.vnet.ibm.com>
Fri, 7 Jul 2017 16:05:39 +0000 (13:05 -0300)
committerThadeu Lima de Souza Cascardo <cascardo@canonical.com>
Fri, 14 Jul 2017 14:33:14 +0000 (11:33 -0300)
BugLink: http://bugs.launchpad.net/bugs/1702521
Currently there are separate spin locks for the two supported I/O queueing
models. This makes it difficult to serialize with paths outside the enqueue
path.

As a design simplification and to support serialization with enqueue
operations, move to only a single lock that is used for enqueueing
regardless of the queueing model.

Signed-off-by: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
Acked-by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
(cherry picked from commit 66ea9bcc392017b6df465b6f5847f6eac966a801)
Signed-off-by: Victor Aoqui <victora@linux.vnet.ibm.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Seth Forshee <seth.forshee@canonical.com>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
drivers/scsi/cxlflash/common.h
drivers/scsi/cxlflash/main.c

index 256af819377d19a9a750528b926ab91dd6bf100e..6fc32cfc6026928728fc9b1dac23b1069eee0814 100644 (file)
@@ -193,7 +193,7 @@ struct hwq {
        u32 index;              /* Index of this hwq */
 
        atomic_t hsq_credits;
-       spinlock_t hsq_slock;
+       spinlock_t hsq_slock;   /* Hardware send queue lock */
        struct sisl_ioarcb *hsq_start;
        struct sisl_ioarcb *hsq_end;
        struct sisl_ioarcb *hsq_curr;
@@ -204,7 +204,6 @@ struct hwq {
        bool toggle;
 
        s64 room;
-       spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
 
        struct irq_poll irqpoll;
 } __aligned(cache_line_size());
index a7d57c34349216932cd368caeffb47aecb4a117f..64ea597ca98e7f97dde3c2d9c2500e9d705a859f 100644 (file)
@@ -261,7 +261,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
         * To avoid the performance penalty of MMIO, spread the update of
         * 'room' over multiple commands.
         */
-       spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
+       spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
        if (--hwq->room < 0) {
                room = readq_be(&hwq->host_map->cmd_room);
                if (room <= 0) {
@@ -277,7 +277,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 
        writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
 out:
-       spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
+       spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
        dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
                cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
        return rc;
@@ -1722,7 +1722,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
                hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
                hwq->hrrq_curr = hwq->hrrq_start;
                hwq->toggle = 1;
+
+               /* Initialize spin locks */
                spin_lock_init(&hwq->hrrq_slock);
+               spin_lock_init(&hwq->hsq_slock);
 
                /* Initialize SQ */
                if (afu_is_sq_cmd_mode(afu)) {
@@ -1731,7 +1734,6 @@ static int start_afu(struct cxlflash_cfg *cfg)
                        hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
                        hwq->hsq_curr = hwq->hsq_start;
 
-                       spin_lock_init(&hwq->hsq_slock);
                        atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
                }
 
@@ -1984,7 +1986,6 @@ static int init_afu(struct cxlflash_cfg *cfg)
        for (i = 0; i < afu->num_hwqs; i++) {
                hwq = get_hwq(afu, i);
 
-               spin_lock_init(&hwq->rrin_slock);
                hwq->room = readq_be(&hwq->host_map->cmd_room);
        }