]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/fpga/dfl-pci.c
Merge branch 'next' into for-linus
[mirror_ubuntu-jammy-kernel.git] / drivers / fpga / dfl-pci.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Device Feature List (DFL) PCIe device
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Zhang Yi <Yi.Z.Zhang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17 #include <linux/pci.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/stddef.h>
22 #include <linux/errno.h>
23 #include <linux/aer.h>
24
25 #include "dfl.h"
26
27 #define DRV_VERSION "0.8"
28 #define DRV_NAME "dfl-pci"
29
30 #define PCI_VSEC_ID_INTEL_DFLS 0x43
31
32 #define PCI_VNDR_DFLS_CNT 0x8
33 #define PCI_VNDR_DFLS_RES 0xc
34
35 #define PCI_VNDR_DFLS_RES_BAR_MASK GENMASK(2, 0)
36 #define PCI_VNDR_DFLS_RES_OFF_MASK GENMASK(31, 3)
37
38 struct cci_drvdata {
39 struct dfl_fpga_cdev *cdev; /* container device */
40 };
41
42 static void __iomem *cci_pci_ioremap_bar0(struct pci_dev *pcidev)
43 {
44 if (pcim_iomap_regions(pcidev, BIT(0), DRV_NAME))
45 return NULL;
46
47 return pcim_iomap_table(pcidev)[0];
48 }
49
50 static int cci_pci_alloc_irq(struct pci_dev *pcidev)
51 {
52 int ret, nvec = pci_msix_vec_count(pcidev);
53
54 if (nvec <= 0) {
55 dev_dbg(&pcidev->dev, "fpga interrupt not supported\n");
56 return 0;
57 }
58
59 ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX);
60 if (ret < 0)
61 return ret;
62
63 return nvec;
64 }
65
66 static void cci_pci_free_irq(struct pci_dev *pcidev)
67 {
68 pci_free_irq_vectors(pcidev);
69 }
70
71 /* PCI Device ID */
72 #define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
73 #define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
74 #define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
75 #define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
76 #define PCIE_DEVICE_ID_INTEL_PAC_D5005 0x0B2B
77 /* VF Device */
78 #define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
79 #define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
80 #define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
81 #define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF 0x0B2C
82
83 static struct pci_device_id cci_pcie_id_tbl[] = {
84 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
85 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
86 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
87 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
88 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
89 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
90 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
91 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
92 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
93 {0,}
94 };
95 MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
96
97 static int cci_init_drvdata(struct pci_dev *pcidev)
98 {
99 struct cci_drvdata *drvdata;
100
101 drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
102 if (!drvdata)
103 return -ENOMEM;
104
105 pci_set_drvdata(pcidev, drvdata);
106
107 return 0;
108 }
109
110 static void cci_remove_feature_devs(struct pci_dev *pcidev)
111 {
112 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
113
114 /* remove all children feature devices */
115 dfl_fpga_feature_devs_remove(drvdata->cdev);
116 cci_pci_free_irq(pcidev);
117 }
118
119 static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
120 {
121 unsigned int i;
122 int *table;
123
124 table = kcalloc(nvec, sizeof(int), GFP_KERNEL);
125 if (!table)
126 return table;
127
128 for (i = 0; i < nvec; i++)
129 table[i] = pci_irq_vector(pcidev, i);
130
131 return table;
132 }
133
134 static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
135 {
136 u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
137 int dfl_res_off, i, bars, voff = 0;
138 resource_size_t start, len;
139
140 while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
141 vndr_hdr = 0;
142 pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
143
144 if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
145 pcidev->vendor == PCI_VENDOR_ID_INTEL)
146 break;
147 }
148
149 if (!voff) {
150 dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
151 return -ENODEV;
152 }
153
154 dfl_cnt = 0;
155 pci_read_config_dword(pcidev, voff + PCI_VNDR_DFLS_CNT, &dfl_cnt);
156 if (dfl_cnt > PCI_STD_NUM_BARS) {
157 dev_err(&pcidev->dev, "%s too many DFLs %d > %d\n",
158 __func__, dfl_cnt, PCI_STD_NUM_BARS);
159 return -EINVAL;
160 }
161
162 dfl_res_off = voff + PCI_VNDR_DFLS_RES;
163 if (dfl_res_off + (dfl_cnt * sizeof(u32)) > PCI_CFG_SPACE_EXP_SIZE) {
164 dev_err(&pcidev->dev, "%s DFL VSEC too big for PCIe config space\n",
165 __func__);
166 return -EINVAL;
167 }
168
169 for (i = 0, bars = 0; i < dfl_cnt; i++, dfl_res_off += sizeof(u32)) {
170 dfl_res = GENMASK(31, 0);
171 pci_read_config_dword(pcidev, dfl_res_off, &dfl_res);
172
173 bir = dfl_res & PCI_VNDR_DFLS_RES_BAR_MASK;
174 if (bir >= PCI_STD_NUM_BARS) {
175 dev_err(&pcidev->dev, "%s bad bir number %d\n",
176 __func__, bir);
177 return -EINVAL;
178 }
179
180 if (bars & BIT(bir)) {
181 dev_err(&pcidev->dev, "%s DFL for BAR %d already specified\n",
182 __func__, bir);
183 return -EINVAL;
184 }
185
186 bars |= BIT(bir);
187
188 len = pci_resource_len(pcidev, bir);
189 offset = dfl_res & PCI_VNDR_DFLS_RES_OFF_MASK;
190 if (offset >= len) {
191 dev_err(&pcidev->dev, "%s bad offset %u >= %pa\n",
192 __func__, offset, &len);
193 return -EINVAL;
194 }
195
196 dev_dbg(&pcidev->dev, "%s BAR %d offset 0x%x\n", __func__, bir, offset);
197
198 len -= offset;
199
200 start = pci_resource_start(pcidev, bir) + offset;
201
202 dfl_fpga_enum_info_add_dfl(info, start, len);
203 }
204
205 return 0;
206 }
207
208 /* default method of finding dfls starting at offset 0 of bar 0 */
209 static int find_dfls_by_default(struct pci_dev *pcidev,
210 struct dfl_fpga_enum_info *info)
211 {
212 int port_num, bar, i, ret = 0;
213 resource_size_t start, len;
214 void __iomem *base;
215 u32 offset;
216 u64 v;
217
218 /* start to find Device Feature List from Bar 0 */
219 base = cci_pci_ioremap_bar0(pcidev);
220 if (!base)
221 return -ENOMEM;
222
223 /*
224 * PF device has FME and Ports/AFUs, and VF device only has one
225 * Port/AFU. Check them and add related "Device Feature List" info
226 * for the next step enumeration.
227 */
228 if (dfl_feature_is_fme(base)) {
229 start = pci_resource_start(pcidev, 0);
230 len = pci_resource_len(pcidev, 0);
231
232 dfl_fpga_enum_info_add_dfl(info, start, len);
233
234 /*
235 * find more Device Feature Lists (e.g. Ports) per information
236 * indicated by FME module.
237 */
238 v = readq(base + FME_HDR_CAP);
239 port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
240
241 WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
242
243 for (i = 0; i < port_num; i++) {
244 v = readq(base + FME_HDR_PORT_OFST(i));
245
246 /* skip ports which are not implemented. */
247 if (!(v & FME_PORT_OFST_IMP))
248 continue;
249
250 /*
251 * add Port's Device Feature List information for next
252 * step enumeration.
253 */
254 bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
255 offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
256 start = pci_resource_start(pcidev, bar) + offset;
257 len = pci_resource_len(pcidev, bar) - offset;
258
259 dfl_fpga_enum_info_add_dfl(info, start, len);
260 }
261 } else if (dfl_feature_is_port(base)) {
262 start = pci_resource_start(pcidev, 0);
263 len = pci_resource_len(pcidev, 0);
264
265 dfl_fpga_enum_info_add_dfl(info, start, len);
266 } else {
267 ret = -ENODEV;
268 }
269
270 /* release I/O mappings for next step enumeration */
271 pcim_iounmap_regions(pcidev, BIT(0));
272
273 return ret;
274 }
275
276 /* enumerate feature devices under pci device */
277 static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
278 {
279 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
280 struct dfl_fpga_enum_info *info;
281 struct dfl_fpga_cdev *cdev;
282 int nvec, ret = 0;
283 int *irq_table;
284
285 /* allocate enumeration info via pci_dev */
286 info = dfl_fpga_enum_info_alloc(&pcidev->dev);
287 if (!info)
288 return -ENOMEM;
289
290 /* add irq info for enumeration if the device support irq */
291 nvec = cci_pci_alloc_irq(pcidev);
292 if (nvec < 0) {
293 dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec);
294 ret = nvec;
295 goto enum_info_free_exit;
296 } else if (nvec) {
297 irq_table = cci_pci_create_irq_table(pcidev, nvec);
298 if (!irq_table) {
299 ret = -ENOMEM;
300 goto irq_free_exit;
301 }
302
303 ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table);
304 kfree(irq_table);
305 if (ret)
306 goto irq_free_exit;
307 }
308
309 ret = find_dfls_by_vsec(pcidev, info);
310 if (ret == -ENODEV)
311 ret = find_dfls_by_default(pcidev, info);
312
313 if (ret)
314 goto irq_free_exit;
315
316 /* start enumeration with prepared enumeration information */
317 cdev = dfl_fpga_feature_devs_enumerate(info);
318 if (IS_ERR(cdev)) {
319 dev_err(&pcidev->dev, "Enumeration failure\n");
320 ret = PTR_ERR(cdev);
321 goto irq_free_exit;
322 }
323
324 drvdata->cdev = cdev;
325
326 irq_free_exit:
327 if (ret)
328 cci_pci_free_irq(pcidev);
329 enum_info_free_exit:
330 dfl_fpga_enum_info_free(info);
331
332 return ret;
333 }
334
335 static
336 int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
337 {
338 int ret;
339
340 ret = pcim_enable_device(pcidev);
341 if (ret < 0) {
342 dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
343 return ret;
344 }
345
346 ret = pci_enable_pcie_error_reporting(pcidev);
347 if (ret && ret != -EINVAL)
348 dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
349
350 pci_set_master(pcidev);
351
352 if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
353 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
354 if (ret)
355 goto disable_error_report_exit;
356 } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
357 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
358 if (ret)
359 goto disable_error_report_exit;
360 } else {
361 ret = -EIO;
362 dev_err(&pcidev->dev, "No suitable DMA support available.\n");
363 goto disable_error_report_exit;
364 }
365
366 ret = cci_init_drvdata(pcidev);
367 if (ret) {
368 dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
369 goto disable_error_report_exit;
370 }
371
372 ret = cci_enumerate_feature_devs(pcidev);
373 if (!ret)
374 return ret;
375
376 dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
377
378 disable_error_report_exit:
379 pci_disable_pcie_error_reporting(pcidev);
380 return ret;
381 }
382
383 static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
384 {
385 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
386 struct dfl_fpga_cdev *cdev = drvdata->cdev;
387
388 if (!num_vfs) {
389 /*
390 * disable SRIOV and then put released ports back to default
391 * PF access mode.
392 */
393 pci_disable_sriov(pcidev);
394
395 dfl_fpga_cdev_config_ports_pf(cdev);
396
397 } else {
398 int ret;
399
400 /*
401 * before enable SRIOV, put released ports into VF access mode
402 * first of all.
403 */
404 ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs);
405 if (ret)
406 return ret;
407
408 ret = pci_enable_sriov(pcidev, num_vfs);
409 if (ret) {
410 dfl_fpga_cdev_config_ports_pf(cdev);
411 return ret;
412 }
413 }
414
415 return num_vfs;
416 }
417
418 static void cci_pci_remove(struct pci_dev *pcidev)
419 {
420 if (dev_is_pf(&pcidev->dev))
421 cci_pci_sriov_configure(pcidev, 0);
422
423 cci_remove_feature_devs(pcidev);
424 pci_disable_pcie_error_reporting(pcidev);
425 }
426
427 static struct pci_driver cci_pci_driver = {
428 .name = DRV_NAME,
429 .id_table = cci_pcie_id_tbl,
430 .probe = cci_pci_probe,
431 .remove = cci_pci_remove,
432 .sriov_configure = cci_pci_sriov_configure,
433 };
434
435 module_pci_driver(cci_pci_driver);
436
437 MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
438 MODULE_AUTHOR("Intel Corporation");
439 MODULE_LICENSE("GPL v2");