]> git.proxmox.com Git - pve-kernel-jessie.git/blame - nvme-revert-NVMe-only-setup-MSIX-once.patch
revert buggy NVME setup commit
[pve-kernel-jessie.git] / nvme-revert-NVMe-only-setup-MSIX-once.patch
CommitLineData
4a695758
FG
1From af220b3adff164d1b8b89d7d5c8bb741d6195012 Mon Sep 17 00:00:00 2001
2From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
3Date: Thu, 19 Jan 2017 15:19:46 +0100
4Subject: [PATCH] Revert "UBUNTU: SAUCE: (no-up) NVMe: only setup MSIX once"
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9This reverts commit 96fce9e4025b96b08bfe5196d3380ab9215cb64b.
10
11Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
12---
13 drivers/nvme/host/pci.c | 73 ++++++++++++++++++++++++++++++++++---------------
14 1 file changed, 51 insertions(+), 22 deletions(-)
15
16diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
17index ae1f695..b9cf5aa 100644
18--- a/drivers/nvme/host/pci.c
19+++ b/drivers/nvme/host/pci.c
20@@ -1613,7 +1613,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
21 struct pci_dev *pdev = to_pci_dev(dev->dev);
22 int result, i, vecs, nr_io_queues, size;
23
24- nr_io_queues = dev->max_qid + 1;
25+ nr_io_queues = num_possible_cpus();
26 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
27 if (result < 0)
28 return result;
29@@ -1653,7 +1653,45 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
30 adminq->q_db = dev->dbs;
31 }
32
33- dev->max_qid = nr_io_queues - 1;
34+ /* Deregister the admin queue's interrupt */
35+ free_irq(dev->entry[0].vector, adminq);
36+
37+ /*
38+ * If we enable msix early due to not intx, disable it again before
39+ * setting up the full range we need.
40+ */
41+ if (pdev->msi_enabled)
42+ pci_disable_msi(pdev);
43+ else if (pdev->msix_enabled)
44+ pci_disable_msix(pdev);
45+
46+ for (i = 0; i < nr_io_queues; i++)
47+ dev->entry[i].entry = i;
48+ vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
49+ if (vecs < 0) {
50+ vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
51+ if (vecs < 0) {
52+ vecs = 1;
53+ } else {
54+ for (i = 0; i < vecs; i++)
55+ dev->entry[i].vector = i + pdev->irq;
56+ }
57+ }
58+
59+ /*
60+ * Should investigate if there's a performance win from allocating
61+ * more queues than interrupt vectors; it might allow the submission
62+ * path to scale better, even if the receive path is limited by the
63+ * number of interrupts.
64+ */
65+ nr_io_queues = vecs;
66+ dev->max_qid = nr_io_queues;
67+
68+ result = queue_request_irq(dev, adminq, adminq->irqname);
69+ if (result) {
70+ adminq->cq_vector = -1;
71+ goto free_queues;
72+ }
73
74 /* Free previously allocated queues that are no longer usable */
75 nvme_free_queues(dev, nr_io_queues + 1);
76@@ -1806,7 +1844,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
77 static int nvme_pci_enable(struct nvme_dev *dev)
78 {
79 u64 cap;
80- int result = -ENOMEM, nr_io_queues, i, vecs;
81+ int result = -ENOMEM;
82 struct pci_dev *pdev = to_pci_dev(dev->dev);
83
84 if (pci_enable_device_mem(pdev))
85@@ -1823,30 +1861,21 @@ static int nvme_pci_enable(struct nvme_dev *dev)
86 goto disable;
87 }
88
89- nr_io_queues = num_possible_cpus();
90-
91- for (i = 0; i < nr_io_queues; i++)
92- dev->entry[i].entry = i;
93- vecs = pci_enable_msix_range(pdev, dev->entry, 1, nr_io_queues);
94- if (vecs < 0) {
95- vecs = pci_enable_msi_range(pdev, 1, min(nr_io_queues, 32));
96- if (vecs < 0) {
97- result = vecs;
98- goto disable;
99- } else {
100- for (i = 0; i < vecs; i++)
101- dev->entry[i].vector = i + pdev->irq;
102- }
103+ /*
104+ * Some devices and/or platforms don't advertise or work with INTx
105+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
106+ * adjust this later.
107+ */
108+ if (pci_enable_msix(pdev, dev->entry, 1)) {
109+ pci_enable_msi(pdev);
110+ dev->entry[0].vector = pdev->irq;
111 }
112
113- if (vecs < 2) {
114- dev_err(dev->ctrl.device, "Failed to get enough MSI/MSIX interrupts\n");
115- result = -ENOSPC;
116+ if (!dev->entry[0].vector) {
117+ result = -ENODEV;
118 goto disable;
119 }
120
121- dev->max_qid = vecs - 1;
122-
123 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
124
125 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
126--
1272.1.4
128