]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
NVMe: Use unbounded work queue for all work
authorKeith Busch <keith.busch@intel.com>
Fri, 23 Oct 2015 17:42:02 +0000 (11:42 -0600)
committerTim Gardner <tim.gardner@canonical.com>
Mon, 29 Feb 2016 15:59:12 +0000 (08:59 -0700)
BugLink: http://bugs.launchpad.net/bugs/1531539
Removes all usage of the global work queue so work can't be
scheduled on two different work queues, and removes nvme's work queue
singlethreadedness so controllers can be driven in parallel.

Signed-off-by: Keith Busch <keith.busch@intel.com>
[hch: keep the dead controller removal on the system workqueue to avoid
 deadlocks]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit 92f7a1624bbc2361b96db81de89aee1baae40da9)
Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
drivers/nvme/host/pci.c

index eace85b3b43982562c81685d4bf78b66a232814b..06917f62e931972e7f35198773672dda9a99d718 100644 (file)
@@ -371,7 +371,7 @@ static void async_req_completion(struct nvme_queue *nvmeq, void *ctx,
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
                dev_info(nvmeq->q_dmadev, "rescanning\n");
-               schedule_work(&nvmeq->dev->scan_work);
+               queue_work(nvme_workq, &nvmeq->dev->scan_work);
        default:
                dev_warn(nvmeq->q_dmadev, "async event result %08x\n", result);
        }
@@ -1782,7 +1782,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
        }
-       schedule_work(&dev->scan_work);
+       queue_work(nvme_workq, &dev->scan_work);
        return 0;
 }
 
@@ -2343,7 +2343,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
-       schedule_work(&dev->reset_work);
+       queue_work(nvme_workq, &dev->reset_work);
        return 0;
 
  release_pools:
@@ -2364,7 +2364,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
        if (prepare)
                nvme_dev_shutdown(dev);
        else
-               schedule_work(&dev->reset_work);
+               queue_work(nvme_workq, &dev->reset_work);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
@@ -2415,7 +2415,7 @@ static int nvme_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       schedule_work(&ndev->reset_work);
+       queue_work(nvme_workq, &ndev->reset_work);
        return 0;
 }
 #endif
@@ -2463,7 +2463,7 @@ static int __init nvme_init(void)
 
        init_waitqueue_head(&nvme_kthread_wait);
 
-       nvme_workq = create_singlethread_workqueue("nvme");
+       nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
        if (!nvme_workq)
                return -ENOMEM;