]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
scsi: hpsa: fix selection of reply queue
authorMing Lei <ming.lei@redhat.com>
Tue, 15 May 2018 13:03:20 +0000 (10:03 -0300)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 12 Jun 2018 06:28:55 +0000 (02:28 -0400)
BugLink: http://bugs.launchpad.net/bugs/1759723
Since commit 84676c1f21e8 ("genirq/affinity: assign vectors to all
possible CPUs") we could end up with an MSI-X vector that did not have
any online CPUs mapped. This would lead to I/O hangs since there was no
CPU to receive the completion.

Retrieve IRQ affinity information using pci_irq_get_affinity() and use
this mapping to choose a reply queue.

[mkp: tweaked commit desc]

Cc: Hannes Reinecke <hare@suse.de>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>,
Cc: James Bottomley <james.bottomley@hansenpartnership.com>,
Cc: Christoph Hellwig <hch@lst.de>,
Cc: Don Brace <don.brace@microsemi.com>
Cc: Kashyap Desai <kashyap.desai@broadcom.com>
Cc: Laurence Oberman <loberman@redhat.com>
Cc: Meelis Roos <mroos@linux.ee>
Cc: Artem Bityutskiy <artem.bityutskiy@intel.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Fixes: 84676c1f21e8 ("genirq/affinity: assign vectors to all possible CPUs")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Laurence Oberman <loberman@redhat.com>
Tested-by: Don Brace <don.brace@microsemi.com>
Tested-by: Artem Bityutskiy <artem.bityutskiy@intel.com>
Acked-by: Don Brace <don.brace@microsemi.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
(cherry picked from commit 8b834bff1b73dce46f4e9f5e84af6f73fed8b0ef)
Signed-off-by: Jose Ricardo Ziviani <joserz@linux.ibm.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h

index b54d17be6d01de6915e517d7471c28bcb983e84a..211975cc28c978952eaf29a6b5efb7d1e34169a5 100644 (file)
@@ -1045,11 +1045,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
                c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
                if (unlikely(!h->msix_vectors))
                        return;
-               if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-                       c->Header.ReplyQueue =
-                               raw_smp_processor_id() % h->nreply_queues;
-               else
-                       c->Header.ReplyQueue = reply_queue % h->nreply_queues;
+               c->Header.ReplyQueue = reply_queue;
        }
 }
 
@@ -1063,10 +1059,7 @@ static void set_ioaccel1_performant_mode(struct ctlr_info *h,
         * Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->ReplyQueue = reply_queue % h->nreply_queues;
+       cp->ReplyQueue = reply_queue;
        /*
         * Set the bits in the address sent down to include:
         *  - performant mode bit (bit 0)
@@ -1087,10 +1080,7 @@ static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
        /* Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->reply_queue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->reply_queue = reply_queue % h->nreply_queues;
+       cp->reply_queue = reply_queue;
        /* Set the bits in the address sent down to include:
         *  - performant mode bit not used in ioaccel mode 2
         *  - pull count (bits 0-3)
@@ -1109,10 +1099,7 @@ static void set_ioaccel2_performant_mode(struct ctlr_info *h,
         * Tell the controller to post the reply to the queue for this
         * processor.  This seems to give the best I/O throughput.
         */
-       if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
-               cp->reply_queue = smp_processor_id() % h->nreply_queues;
-       else
-               cp->reply_queue = reply_queue % h->nreply_queues;
+       cp->reply_queue = reply_queue;
        /*
         * Set the bits in the address sent down to include:
         *  - performant mode bit not used in ioaccel mode 2
@@ -1157,6 +1144,8 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
 {
        dial_down_lockup_detection_during_fw_flash(h, c);
        atomic_inc(&h->commands_outstanding);
+
+       reply_queue = h->reply_map[raw_smp_processor_id()];
        switch (c->cmd_type) {
        case CMD_IOACCEL1:
                set_ioaccel1_performant_mode(h, c, reply_queue);
@@ -7384,6 +7373,26 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
        h->msix_vectors = 0;
 }
 
+static void hpsa_setup_reply_map(struct ctlr_info *h)
+{
+       const struct cpumask *mask;
+       unsigned int queue, cpu;
+
+       for (queue = 0; queue < h->msix_vectors; queue++) {
+               mask = pci_irq_get_affinity(h->pdev, queue);
+               if (!mask)
+                       goto fallback;
+
+               for_each_cpu(cpu, mask)
+                       h->reply_map[cpu] = queue;
+       }
+       return;
+
+fallback:
+       for_each_possible_cpu(cpu)
+               h->reply_map[cpu] = 0;
+}
+
 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
  * controllers that are capable. If not, we use legacy INTx mode.
  */
@@ -7779,6 +7788,10 @@ static int hpsa_pci_init(struct ctlr_info *h)
        err = hpsa_interrupt_mode(h);
        if (err)
                goto clean1;
+
+       /* setup mapping between CPU and reply queue */
+       hpsa_setup_reply_map(h);
+
        err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
        if (err)
                goto clean2;    /* intmode+region, pci */
@@ -8490,6 +8503,28 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
        return wq;
 }
 
+static void hpda_free_ctlr_info(struct ctlr_info *h)
+{
+       kfree(h->reply_map);
+       kfree(h);
+}
+
+static struct ctlr_info *hpda_alloc_ctlr_info(void)
+{
+       struct ctlr_info *h;
+
+       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       if (!h)
+               return NULL;
+
+       h->reply_map = kzalloc(sizeof(*h->reply_map) * nr_cpu_ids, GFP_KERNEL);
+       if (!h->reply_map) {
+               kfree(h);
+               return NULL;
+       }
+       return h;
+}
+
 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int dac, rc;
@@ -8527,7 +8562,7 @@ reinit_after_soft_reset:
         * the driver.  See comments in hpsa.h for more info.
         */
        BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
-       h = kzalloc(sizeof(*h), GFP_KERNEL);
+       h = hpda_alloc_ctlr_info();
        if (!h) {
                dev_err(&pdev->dev, "Failed to allocate controller head\n");
                return -ENOMEM;
@@ -8926,7 +8961,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
        h->lockup_detected = NULL;                      /* init_one 2 */
        /* (void) pci_disable_pcie_error_reporting(pdev); */    /* init_one 1 */
 
-       kfree(h);                                       /* init_one 1 */
+       hpda_free_ctlr_info(h);                         /* init_one 1 */
 }
 
 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
index 018f980a701ce406e9edf0119240c39df47b9858..fb9f5e7f8209447771d07016bca7924774b143af 100644 (file)
@@ -158,6 +158,7 @@ struct bmic_controller_parameters {
 #pragma pack()
 
 struct ctlr_info {
+       unsigned int *reply_map;
        int     ctlr;
        char    devname[8];
        char    *product_name;