]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
scsi: virtio_scsi: fix IO hang caused by automatic irq vector affinity
authorMing Lei <ming.lei@redhat.com>
Tue, 15 May 2018 13:03:23 +0000 (10:03 -0300)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 12 Jun 2018 06:28:55 +0000 (02:28 -0400)
BugLink: http://bugs.launchpad.net/bugs/1759723
Since commit 84676c1f21e8ff5 ("genirq/affinity: assign vectors to all
possible CPUs") it is possible to end up in a scenario where only
offline CPUs are mapped to an interrupt vector.

This is only an issue for the legacy I/O path since with blk-mq/scsi-mq
an I/O can't be submitted to a hardware queue if the queue isn't mapped
to an online CPU.

Fix this issue by forcing virtio-scsi to use blk-mq.

[mkp: commit desc]

Cc: Omar Sandoval <osandov@fb.com>,
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>,
Cc: James Bottomley <james.bottomley@hansenpartnership.com>,
Cc: Christoph Hellwig <hch@lst.de>,
Cc: Don Brace <don.brace@microsemi.com>
Cc: Kashyap Desai <kashyap.desai@broadcom.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Laurence Oberman <loberman@redhat.com>
Fixes: 84676c1f21e8 ("genirq/affinity: assign vectors to all possible CPUs")
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
(cherry picked from commit b5b6e8c8d3b4cbeb447a0f10c7d5de3caa573299)
Signed-off-by: Jose Ricardo Ziviani <joserz@linux.ibm.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/scsi/virtio_scsi.c

index 7c28e8d4955a965805cdde3d2ddb07a7f8c5d6ea..54e3a0f6844c4c81692fe9aaa568efe1f2215866 100644 (file)
@@ -91,9 +91,6 @@ struct virtio_scsi_vq {
 struct virtio_scsi_target_state {
        seqcount_t tgt_seq;
 
-       /* Count of outstanding requests. */
-       atomic_t reqs;
-
        /* Currently active virtqueue for requests sent to this target. */
        struct virtio_scsi_vq *req_vq;
 };
@@ -152,8 +149,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
        struct virtio_scsi_cmd *cmd = buf;
        struct scsi_cmnd *sc = cmd->sc;
        struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
-       struct virtio_scsi_target_state *tgt =
-                               scsi_target(sc->device)->hostdata;
 
        dev_dbg(&sc->device->sdev_gendev,
                "cmd %p response %u status %#02x sense_len %u\n",
@@ -210,8 +205,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
        }
 
        sc->scsi_done(sc);
-
-       atomic_dec(&tgt->reqs);
 }
 
 static void virtscsi_vq_done(struct virtio_scsi *vscsi,
@@ -580,10 +573,7 @@ static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
                                        struct scsi_cmnd *sc)
 {
        struct virtio_scsi *vscsi = shost_priv(sh);
-       struct virtio_scsi_target_state *tgt =
-                               scsi_target(sc->device)->hostdata;
 
-       atomic_inc(&tgt->reqs);
        return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
 }
 
@@ -596,55 +586,11 @@ static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
        return &vscsi->req_vqs[hwq];
 }
 
-static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
-                                              struct virtio_scsi_target_state *tgt)
-{
-       struct virtio_scsi_vq *vq;
-       unsigned long flags;
-       u32 queue_num;
-
-       local_irq_save(flags);
-       if (atomic_inc_return(&tgt->reqs) > 1) {
-               unsigned long seq;
-
-               do {
-                       seq = read_seqcount_begin(&tgt->tgt_seq);
-                       vq = tgt->req_vq;
-               } while (read_seqcount_retry(&tgt->tgt_seq, seq));
-       } else {
-               /* no writes can be concurrent because of atomic_t */
-               write_seqcount_begin(&tgt->tgt_seq);
-
-               /* keep previous req_vq if a reader just arrived */
-               if (unlikely(atomic_read(&tgt->reqs) > 1)) {
-                       vq = tgt->req_vq;
-                       goto unlock;
-               }
-
-               queue_num = smp_processor_id();
-               while (unlikely(queue_num >= vscsi->num_queues))
-                       queue_num -= vscsi->num_queues;
-               tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
- unlock:
-               write_seqcount_end(&tgt->tgt_seq);
-       }
-       local_irq_restore(flags);
-
-       return vq;
-}
-
 static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
                                       struct scsi_cmnd *sc)
 {
        struct virtio_scsi *vscsi = shost_priv(sh);
-       struct virtio_scsi_target_state *tgt =
-                               scsi_target(sc->device)->hostdata;
-       struct virtio_scsi_vq *req_vq;
-
-       if (shost_use_blk_mq(sh))
-               req_vq = virtscsi_pick_vq_mq(vscsi, sc);
-       else
-               req_vq = virtscsi_pick_vq(vscsi, tgt);
+       struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
 
        return virtscsi_queuecommand(vscsi, req_vq, sc);
 }
@@ -775,7 +721,6 @@ static int virtscsi_target_alloc(struct scsi_target *starget)
                return -ENOMEM;
 
        seqcount_init(&tgt->tgt_seq);
-       atomic_set(&tgt->reqs, 0);
        tgt->req_vq = &vscsi->req_vqs[0];
 
        starget->hostdata = tgt;
@@ -823,6 +768,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
        .target_alloc = virtscsi_target_alloc,
        .target_destroy = virtscsi_target_destroy,
        .track_queue_depth = 1,
+       .force_blk_mq = 1,
 };
 
 static struct scsi_host_template virtscsi_host_template_multi = {
@@ -844,6 +790,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
        .target_destroy = virtscsi_target_destroy,
        .map_queues = virtscsi_map_queues,
        .track_queue_depth = 1,
+       .force_blk_mq = 1,
 };
 
 #define virtscsi_config_get(vdev, fld) \