]>
Commit | Line | Data |
---|---|---|
bfe1d560 DJ |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ | |
3 | #include <linux/init.h> | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/pci.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/delay.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/workqueue.h> | |
12 | #include <linux/aer.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/io-64-nonatomic-lo-hi.h> | |
15 | #include <linux/device.h> | |
16 | #include <linux/idr.h> | |
8e50d392 DJ |
17 | #include <linux/intel-svm.h> |
18 | #include <linux/iommu.h> | |
bfe1d560 | 19 | #include <uapi/linux/idxd.h> |
8f47d1a5 DJ |
20 | #include <linux/dmaengine.h> |
21 | #include "../dmaengine.h" | |
bfe1d560 DJ |
22 | #include "registers.h" |
23 | #include "idxd.h" | |
24 | ||
25 | MODULE_VERSION(IDXD_DRIVER_VERSION); | |
26 | MODULE_LICENSE("GPL v2"); | |
27 | MODULE_AUTHOR("Intel Corporation"); | |
28 | ||
03d939c7 DJ |
29 | static bool sva = true; |
30 | module_param(sva, bool, 0644); | |
31 | MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); | |
32 | ||
bfe1d560 DJ |
33 | #define DRV_NAME "idxd" |
34 | ||
8e50d392 | 35 | bool support_enqcmd; |
4b73e4eb | 36 | DEFINE_IDA(idxd_ida); |
bfe1d560 | 37 | |
435b512d DJ |
38 | static struct idxd_driver_data idxd_driver_data[] = { |
39 | [IDXD_TYPE_DSA] = { | |
40 | .name_prefix = "dsa", | |
41 | .type = IDXD_TYPE_DSA, | |
42 | .compl_size = sizeof(struct dsa_completion_record), | |
43 | .align = 32, | |
44 | .dev_type = &dsa_device_type, | |
45 | }, | |
46 | [IDXD_TYPE_IAX] = { | |
47 | .name_prefix = "iax", | |
48 | .type = IDXD_TYPE_IAX, | |
49 | .compl_size = sizeof(struct iax_completion_record), | |
50 | .align = 64, | |
51 | .dev_type = &iax_device_type, | |
52 | }, | |
53 | }; | |
54 | ||
bfe1d560 DJ |
55 | static struct pci_device_id idxd_pci_tbl[] = { |
56 | /* DSA ver 1.0 platforms */ | |
435b512d | 57 | { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, |
f25b4638 DJ |
58 | |
59 | /* IAX ver 1.0 platforms */ | |
435b512d | 60 | { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, |
bfe1d560 DJ |
61 | { 0, } |
62 | }; | |
63 | MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); | |
64 | ||
bfe1d560 DJ |
65 | static int idxd_setup_interrupts(struct idxd_device *idxd) |
66 | { | |
67 | struct pci_dev *pdev = idxd->pdev; | |
68 | struct device *dev = &pdev->dev; | |
bfe1d560 DJ |
69 | struct idxd_irq_entry *irq_entry; |
70 | int i, msixcnt; | |
71 | int rc = 0; | |
72 | ||
73 | msixcnt = pci_msix_vec_count(pdev); | |
74 | if (msixcnt < 0) { | |
75 | dev_err(dev, "Not MSI-X interrupt capable.\n"); | |
5fc8e85f | 76 | return -ENOSPC; |
bfe1d560 DJ |
77 | } |
78 | ||
5fc8e85f DJ |
79 | rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); |
80 | if (rc != msixcnt) { | |
81 | dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); | |
82 | return -ENOSPC; | |
bfe1d560 DJ |
83 | } |
84 | dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); | |
85 | ||
86 | /* | |
87 | * We implement 1 completion list per MSI-X entry except for | |
88 | * entry 0, which is for errors and others. | |
89 | */ | |
47c16ac2 DJ |
90 | idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), |
91 | GFP_KERNEL, dev_to_node(dev)); | |
bfe1d560 DJ |
92 | if (!idxd->irq_entries) { |
93 | rc = -ENOMEM; | |
5fc8e85f | 94 | goto err_irq_entries; |
bfe1d560 DJ |
95 | } |
96 | ||
97 | for (i = 0; i < msixcnt; i++) { | |
98 | idxd->irq_entries[i].id = i; | |
99 | idxd->irq_entries[i].idxd = idxd; | |
5fc8e85f | 100 | idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); |
e4f4d8cd | 101 | spin_lock_init(&idxd->irq_entries[i].list_lock); |
bfe1d560 DJ |
102 | } |
103 | ||
bfe1d560 | 104 | irq_entry = &idxd->irq_entries[0]; |
a1610461 | 105 | rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, |
5fc8e85f | 106 | 0, "idxd-misc", irq_entry); |
bfe1d560 DJ |
107 | if (rc < 0) { |
108 | dev_err(dev, "Failed to allocate misc interrupt.\n"); | |
5fc8e85f | 109 | goto err_misc_irq; |
bfe1d560 DJ |
110 | } |
111 | ||
5fc8e85f | 112 | dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); |
bfe1d560 DJ |
113 | |
114 | /* first MSI-X entry is not for wq interrupts */ | |
115 | idxd->num_wq_irqs = msixcnt - 1; | |
116 | ||
117 | for (i = 1; i < msixcnt; i++) { | |
bfe1d560 DJ |
118 | irq_entry = &idxd->irq_entries[i]; |
119 | ||
120 | init_llist_head(&idxd->irq_entries[i].pending_llist); | |
121 | INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); | |
a1610461 | 122 | rc = request_threaded_irq(irq_entry->vector, NULL, |
5fc8e85f | 123 | idxd_wq_thread, 0, "idxd-portal", irq_entry); |
bfe1d560 | 124 | if (rc < 0) { |
5fc8e85f DJ |
125 | dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); |
126 | goto err_wq_irqs; | |
bfe1d560 | 127 | } |
eb15e715 | 128 | |
5fc8e85f | 129 | dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); |
eb15e715 DJ |
130 | if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { |
131 | /* | |
132 | * The MSIX vector enumeration starts at 1 with vector 0 being the | |
133 | * misc interrupt that handles non I/O completion events. The | |
134 | * interrupt handles are for IMS enumeration on guest. The misc | |
135 | * interrupt vector does not require a handle and therefore we start | |
136 | * the int_handles at index 0. Since 'i' starts at 1, the first | |
137 | * int_handles index will be 0. | |
138 | */ | |
139 | rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1], | |
140 | IDXD_IRQ_MSIX); | |
141 | if (rc < 0) { | |
142 | free_irq(irq_entry->vector, irq_entry); | |
143 | goto err_wq_irqs; | |
144 | } | |
145 | dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]); | |
146 | } | |
bfe1d560 DJ |
147 | } |
148 | ||
149 | idxd_unmask_error_interrupts(idxd); | |
6df0e6c5 | 150 | idxd_msix_perm_setup(idxd); |
bfe1d560 DJ |
151 | return 0; |
152 | ||
5fc8e85f DJ |
153 | err_wq_irqs: |
154 | while (--i >= 0) { | |
155 | irq_entry = &idxd->irq_entries[i]; | |
156 | free_irq(irq_entry->vector, irq_entry); | |
eb15e715 DJ |
157 | if (i != 0) |
158 | idxd_device_release_int_handle(idxd, | |
159 | idxd->int_handles[i], IDXD_IRQ_MSIX); | |
5fc8e85f DJ |
160 | } |
161 | err_misc_irq: | |
bfe1d560 DJ |
162 | /* Disable error interrupt generation */ |
163 | idxd_mask_error_interrupts(idxd); | |
5fc8e85f DJ |
164 | err_irq_entries: |
165 | pci_free_irq_vectors(pdev); | |
bfe1d560 DJ |
166 | dev_err(dev, "No usable interrupts\n"); |
167 | return rc; | |
168 | } | |
169 | ||
7c5dd23e DJ |
170 | static int idxd_setup_wqs(struct idxd_device *idxd) |
171 | { | |
172 | struct device *dev = &idxd->pdev->dev; | |
173 | struct idxd_wq *wq; | |
174 | int i, rc; | |
175 | ||
176 | idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), | |
177 | GFP_KERNEL, dev_to_node(dev)); | |
178 | if (!idxd->wqs) | |
179 | return -ENOMEM; | |
180 | ||
181 | for (i = 0; i < idxd->max_wqs; i++) { | |
182 | wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); | |
183 | if (!wq) { | |
184 | rc = -ENOMEM; | |
185 | goto err; | |
186 | } | |
187 | ||
188 | wq->id = i; | |
189 | wq->idxd = idxd; | |
190 | device_initialize(&wq->conf_dev); | |
191 | wq->conf_dev.parent = &idxd->conf_dev; | |
4b73e4eb | 192 | wq->conf_dev.bus = &dsa_bus_type; |
7c5dd23e DJ |
193 | wq->conf_dev.type = &idxd_wq_device_type; |
194 | rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); | |
195 | if (rc < 0) { | |
196 | put_device(&wq->conf_dev); | |
197 | goto err; | |
198 | } | |
199 | ||
200 | mutex_init(&wq->wq_lock); | |
04922b74 | 201 | init_waitqueue_head(&wq->err_queue); |
93a40a6d | 202 | init_completion(&wq->wq_dead); |
7c5dd23e DJ |
203 | wq->max_xfer_bytes = idxd->max_xfer_bytes; |
204 | wq->max_batch_size = idxd->max_batch_size; | |
205 | wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); | |
206 | if (!wq->wqcfg) { | |
207 | put_device(&wq->conf_dev); | |
208 | rc = -ENOMEM; | |
209 | goto err; | |
210 | } | |
211 | idxd->wqs[i] = wq; | |
212 | } | |
213 | ||
214 | return 0; | |
215 | ||
216 | err: | |
217 | while (--i >= 0) | |
218 | put_device(&idxd->wqs[i]->conf_dev); | |
219 | return rc; | |
220 | } | |
221 | ||
75b91130 DJ |
222 | static int idxd_setup_engines(struct idxd_device *idxd) |
223 | { | |
224 | struct idxd_engine *engine; | |
225 | struct device *dev = &idxd->pdev->dev; | |
226 | int i, rc; | |
227 | ||
228 | idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), | |
229 | GFP_KERNEL, dev_to_node(dev)); | |
230 | if (!idxd->engines) | |
231 | return -ENOMEM; | |
232 | ||
233 | for (i = 0; i < idxd->max_engines; i++) { | |
234 | engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); | |
235 | if (!engine) { | |
236 | rc = -ENOMEM; | |
237 | goto err; | |
238 | } | |
239 | ||
240 | engine->id = i; | |
241 | engine->idxd = idxd; | |
242 | device_initialize(&engine->conf_dev); | |
243 | engine->conf_dev.parent = &idxd->conf_dev; | |
244 | engine->conf_dev.type = &idxd_engine_device_type; | |
245 | rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id); | |
246 | if (rc < 0) { | |
247 | put_device(&engine->conf_dev); | |
248 | goto err; | |
249 | } | |
250 | ||
251 | idxd->engines[i] = engine; | |
252 | } | |
253 | ||
254 | return 0; | |
255 | ||
256 | err: | |
257 | while (--i >= 0) | |
258 | put_device(&idxd->engines[i]->conf_dev); | |
259 | return rc; | |
260 | } | |
261 | ||
defe49f9 | 262 | static int idxd_setup_groups(struct idxd_device *idxd) |
bfe1d560 DJ |
263 | { |
264 | struct device *dev = &idxd->pdev->dev; | |
defe49f9 | 265 | struct idxd_group *group; |
7c5dd23e | 266 | int i, rc; |
bfe1d560 | 267 | |
defe49f9 DJ |
268 | idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), |
269 | GFP_KERNEL, dev_to_node(dev)); | |
270 | if (!idxd->groups) | |
271 | return -ENOMEM; | |
272 | ||
273 | for (i = 0; i < idxd->max_groups; i++) { | |
274 | group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); | |
275 | if (!group) { | |
276 | rc = -ENOMEM; | |
277 | goto err; | |
278 | } | |
279 | ||
280 | group->id = i; | |
281 | group->idxd = idxd; | |
282 | device_initialize(&group->conf_dev); | |
283 | group->conf_dev.parent = &idxd->conf_dev; | |
4b73e4eb | 284 | group->conf_dev.bus = &dsa_bus_type; |
defe49f9 DJ |
285 | group->conf_dev.type = &idxd_group_device_type; |
286 | rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id); | |
287 | if (rc < 0) { | |
288 | put_device(&group->conf_dev); | |
289 | goto err; | |
290 | } | |
291 | ||
292 | idxd->groups[i] = group; | |
293 | group->tc_a = -1; | |
294 | group->tc_b = -1; | |
295 | } | |
296 | ||
297 | return 0; | |
298 | ||
299 | err: | |
300 | while (--i >= 0) | |
301 | put_device(&idxd->groups[i]->conf_dev); | |
302 | return rc; | |
303 | } | |
304 | ||
305 | static int idxd_setup_internals(struct idxd_device *idxd) | |
306 | { | |
307 | struct device *dev = &idxd->pdev->dev; | |
308 | int rc, i; | |
309 | ||
0d5c10b4 | 310 | init_waitqueue_head(&idxd->cmd_waitq); |
7c5dd23e | 311 | |
eb15e715 DJ |
312 | if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { |
313 | idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL); | |
314 | if (!idxd->int_handles) | |
315 | return -ENOMEM; | |
316 | } | |
317 | ||
7c5dd23e DJ |
318 | rc = idxd_setup_wqs(idxd); |
319 | if (rc < 0) | |
eb15e715 | 320 | goto err_wqs; |
7c5dd23e | 321 | |
75b91130 DJ |
322 | rc = idxd_setup_engines(idxd); |
323 | if (rc < 0) | |
324 | goto err_engine; | |
325 | ||
defe49f9 DJ |
326 | rc = idxd_setup_groups(idxd); |
327 | if (rc < 0) | |
328 | goto err_group; | |
bfe1d560 | 329 | |
0d5c10b4 | 330 | idxd->wq = create_workqueue(dev_name(dev)); |
7c5dd23e DJ |
331 | if (!idxd->wq) { |
332 | rc = -ENOMEM; | |
defe49f9 | 333 | goto err_wkq_create; |
7c5dd23e | 334 | } |
0d5c10b4 | 335 | |
bfe1d560 | 336 | return 0; |
7c5dd23e | 337 | |
defe49f9 DJ |
338 | err_wkq_create: |
339 | for (i = 0; i < idxd->max_groups; i++) | |
340 | put_device(&idxd->groups[i]->conf_dev); | |
341 | err_group: | |
75b91130 DJ |
342 | for (i = 0; i < idxd->max_engines; i++) |
343 | put_device(&idxd->engines[i]->conf_dev); | |
344 | err_engine: | |
7c5dd23e DJ |
345 | for (i = 0; i < idxd->max_wqs; i++) |
346 | put_device(&idxd->wqs[i]->conf_dev); | |
eb15e715 DJ |
347 | err_wqs: |
348 | kfree(idxd->int_handles); | |
7c5dd23e | 349 | return rc; |
bfe1d560 DJ |
350 | } |
351 | ||
352 | static void idxd_read_table_offsets(struct idxd_device *idxd) | |
353 | { | |
354 | union offsets_reg offsets; | |
355 | struct device *dev = &idxd->pdev->dev; | |
356 | ||
357 | offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); | |
2f8417a9 DJ |
358 | offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); |
359 | idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; | |
bfe1d560 | 360 | dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); |
2f8417a9 DJ |
361 | idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; |
362 | dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); | |
363 | idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; | |
364 | dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); | |
365 | idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; | |
bfe1d560 DJ |
366 | dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); |
367 | } | |
368 | ||
369 | static void idxd_read_caps(struct idxd_device *idxd) | |
370 | { | |
371 | struct device *dev = &idxd->pdev->dev; | |
372 | int i; | |
373 | ||
374 | /* reading generic capabilities */ | |
375 | idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); | |
376 | dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); | |
eb15e715 DJ |
377 | |
378 | if (idxd->hw.gen_cap.cmd_cap) { | |
379 | idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); | |
380 | dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); | |
381 | } | |
382 | ||
bfe1d560 DJ |
383 | idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; |
384 | dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); | |
385 | idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; | |
386 | dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); | |
387 | if (idxd->hw.gen_cap.config_en) | |
388 | set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); | |
389 | ||
390 | /* reading group capabilities */ | |
391 | idxd->hw.group_cap.bits = | |
392 | ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); | |
393 | dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); | |
394 | idxd->max_groups = idxd->hw.group_cap.num_groups; | |
395 | dev_dbg(dev, "max groups: %u\n", idxd->max_groups); | |
396 | idxd->max_tokens = idxd->hw.group_cap.total_tokens; | |
397 | dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); | |
c52ca478 | 398 | idxd->nr_tokens = idxd->max_tokens; |
bfe1d560 DJ |
399 | |
400 | /* read engine capabilities */ | |
401 | idxd->hw.engine_cap.bits = | |
402 | ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); | |
403 | dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); | |
404 | idxd->max_engines = idxd->hw.engine_cap.num_engines; | |
405 | dev_dbg(dev, "max engines: %u\n", idxd->max_engines); | |
406 | ||
407 | /* read workqueue capabilities */ | |
408 | idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); | |
409 | dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); | |
410 | idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; | |
411 | dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); | |
412 | idxd->max_wqs = idxd->hw.wq_cap.num_wqs; | |
413 | dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); | |
d98793b5 DJ |
414 | idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); |
415 | dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); | |
bfe1d560 DJ |
416 | |
417 | /* reading operation capabilities */ | |
418 | for (i = 0; i < 4; i++) { | |
419 | idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + | |
420 | IDXD_OPCAP_OFFSET + i * sizeof(u64)); | |
421 | dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); | |
422 | } | |
423 | } | |
424 | ||
435b512d | 425 | static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) |
bfe1d560 DJ |
426 | { |
427 | struct device *dev = &pdev->dev; | |
428 | struct idxd_device *idxd; | |
47c16ac2 | 429 | int rc; |
bfe1d560 | 430 | |
47c16ac2 | 431 | idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); |
bfe1d560 DJ |
432 | if (!idxd) |
433 | return NULL; | |
434 | ||
435 | idxd->pdev = pdev; | |
435b512d | 436 | idxd->data = data; |
4b73e4eb | 437 | idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); |
47c16ac2 DJ |
438 | if (idxd->id < 0) |
439 | return NULL; | |
440 | ||
441 | device_initialize(&idxd->conf_dev); | |
442 | idxd->conf_dev.parent = dev; | |
4b73e4eb | 443 | idxd->conf_dev.bus = &dsa_bus_type; |
435b512d DJ |
444 | idxd->conf_dev.type = idxd->data->dev_type; |
445 | rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); | |
47c16ac2 DJ |
446 | if (rc < 0) { |
447 | put_device(&idxd->conf_dev); | |
448 | return NULL; | |
449 | } | |
450 | ||
bfe1d560 | 451 | spin_lock_init(&idxd->dev_lock); |
53b2ee7f | 452 | spin_lock_init(&idxd->cmd_lock); |
bfe1d560 DJ |
453 | |
454 | return idxd; | |
455 | } | |
456 | ||
8e50d392 DJ |
457 | static int idxd_enable_system_pasid(struct idxd_device *idxd) |
458 | { | |
459 | int flags; | |
460 | unsigned int pasid; | |
461 | struct iommu_sva *sva; | |
462 | ||
463 | flags = SVM_FLAG_SUPERVISOR_MODE; | |
464 | ||
465 | sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); | |
466 | if (IS_ERR(sva)) { | |
467 | dev_warn(&idxd->pdev->dev, | |
468 | "iommu sva bind failed: %ld\n", PTR_ERR(sva)); | |
469 | return PTR_ERR(sva); | |
470 | } | |
471 | ||
472 | pasid = iommu_sva_get_pasid(sva); | |
473 | if (pasid == IOMMU_PASID_INVALID) { | |
474 | iommu_sva_unbind_device(sva); | |
475 | return -ENODEV; | |
476 | } | |
477 | ||
478 | idxd->sva = sva; | |
479 | idxd->pasid = pasid; | |
480 | dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); | |
481 | return 0; | |
482 | } | |
483 | ||
484 | static void idxd_disable_system_pasid(struct idxd_device *idxd) | |
485 | { | |
486 | ||
487 | iommu_sva_unbind_device(idxd->sva); | |
488 | idxd->sva = NULL; | |
489 | } | |
490 | ||
bfe1d560 DJ |
491 | static int idxd_probe(struct idxd_device *idxd) |
492 | { | |
493 | struct pci_dev *pdev = idxd->pdev; | |
494 | struct device *dev = &pdev->dev; | |
495 | int rc; | |
496 | ||
497 | dev_dbg(dev, "%s entered and resetting device\n", __func__); | |
89e3becd DJ |
498 | rc = idxd_device_init_reset(idxd); |
499 | if (rc < 0) | |
500 | return rc; | |
501 | ||
bfe1d560 DJ |
502 | dev_dbg(dev, "IDXD reset complete\n"); |
503 | ||
03d939c7 | 504 | if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { |
cf5f86a7 DJ |
505 | rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); |
506 | if (rc == 0) { | |
507 | rc = idxd_enable_system_pasid(idxd); | |
508 | if (rc < 0) { | |
509 | iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); | |
510 | dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); | |
511 | } else { | |
512 | set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); | |
513 | } | |
514 | } else { | |
515 | dev_warn(dev, "Unable to turn on SVA feature.\n"); | |
516 | } | |
03d939c7 DJ |
517 | } else if (!sva) { |
518 | dev_warn(dev, "User forced SVA off via module param.\n"); | |
8e50d392 DJ |
519 | } |
520 | ||
bfe1d560 DJ |
521 | idxd_read_caps(idxd); |
522 | idxd_read_table_offsets(idxd); | |
523 | ||
524 | rc = idxd_setup_internals(idxd); | |
525 | if (rc) | |
7c5dd23e | 526 | goto err; |
bfe1d560 | 527 | |
8c66bbdc DJ |
528 | /* If the configs are readonly, then load them from device */ |
529 | if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { | |
530 | dev_dbg(dev, "Loading RO device config\n"); | |
531 | rc = idxd_device_load_config(idxd); | |
532 | if (rc < 0) | |
533 | goto err; | |
534 | } | |
535 | ||
bfe1d560 DJ |
536 | rc = idxd_setup_interrupts(idxd); |
537 | if (rc) | |
7c5dd23e | 538 | goto err; |
bfe1d560 DJ |
539 | |
540 | dev_dbg(dev, "IDXD interrupt setup complete.\n"); | |
541 | ||
42d279f9 DJ |
542 | idxd->major = idxd_cdev_get_major(idxd); |
543 | ||
bfe1d560 DJ |
544 | dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); |
545 | return 0; | |
546 | ||
7c5dd23e | 547 | err: |
8e50d392 DJ |
548 | if (device_pasid_enabled(idxd)) |
549 | idxd_disable_system_pasid(idxd); | |
cf5f86a7 | 550 | iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); |
bfe1d560 DJ |
551 | return rc; |
552 | } | |
553 | ||
554 | static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
555 | { | |
bfe1d560 DJ |
556 | struct device *dev = &pdev->dev; |
557 | struct idxd_device *idxd; | |
435b512d | 558 | struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; |
bfe1d560 | 559 | int rc; |
bfe1d560 | 560 | |
a39c7cd0 | 561 | rc = pci_enable_device(pdev); |
bfe1d560 DJ |
562 | if (rc) |
563 | return rc; | |
564 | ||
8e50d392 | 565 | dev_dbg(dev, "Alloc IDXD context\n"); |
435b512d | 566 | idxd = idxd_alloc(pdev, data); |
a39c7cd0 DJ |
567 | if (!idxd) { |
568 | rc = -ENOMEM; | |
569 | goto err_idxd_alloc; | |
570 | } | |
bfe1d560 | 571 | |
8e50d392 | 572 | dev_dbg(dev, "Mapping BARs\n"); |
a39c7cd0 DJ |
573 | idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); |
574 | if (!idxd->reg_base) { | |
575 | rc = -ENOMEM; | |
576 | goto err_iomap; | |
577 | } | |
bfe1d560 DJ |
578 | |
579 | dev_dbg(dev, "Set DMA masks\n"); | |
580 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
581 | if (rc) | |
582 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
583 | if (rc) | |
a39c7cd0 | 584 | goto err; |
bfe1d560 DJ |
585 | |
586 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
587 | if (rc) | |
588 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
589 | if (rc) | |
a39c7cd0 | 590 | goto err; |
bfe1d560 | 591 | |
bfe1d560 DJ |
592 | dev_dbg(dev, "Set PCI master\n"); |
593 | pci_set_master(pdev); | |
594 | pci_set_drvdata(pdev, idxd); | |
595 | ||
596 | idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); | |
597 | rc = idxd_probe(idxd); | |
598 | if (rc) { | |
599 | dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); | |
a39c7cd0 | 600 | goto err; |
bfe1d560 DJ |
601 | } |
602 | ||
47c16ac2 | 603 | rc = idxd_register_devices(idxd); |
c52ca478 DJ |
604 | if (rc) { |
605 | dev_err(dev, "IDXD sysfs setup failed\n"); | |
a39c7cd0 | 606 | goto err; |
c52ca478 DJ |
607 | } |
608 | ||
609 | idxd->state = IDXD_DEV_CONF_READY; | |
610 | ||
bfe1d560 DJ |
611 | dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", |
612 | idxd->hw.version); | |
613 | ||
614 | return 0; | |
a39c7cd0 DJ |
615 | |
616 | err: | |
617 | pci_iounmap(pdev, idxd->reg_base); | |
618 | err_iomap: | |
47c16ac2 | 619 | put_device(&idxd->conf_dev); |
a39c7cd0 DJ |
620 | err_idxd_alloc: |
621 | pci_disable_device(pdev); | |
622 | return rc; | |
bfe1d560 DJ |
623 | } |
624 | ||
8f47d1a5 DJ |
625 | static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) |
626 | { | |
627 | struct idxd_desc *desc, *itr; | |
628 | struct llist_node *head; | |
629 | ||
630 | head = llist_del_all(&ie->pending_llist); | |
631 | if (!head) | |
632 | return; | |
633 | ||
634 | llist_for_each_entry_safe(desc, itr, head, llnode) { | |
635 | idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); | |
636 | idxd_free_desc(desc->wq, desc); | |
637 | } | |
638 | } | |
639 | ||
640 | static void idxd_flush_work_list(struct idxd_irq_entry *ie) | |
641 | { | |
642 | struct idxd_desc *desc, *iter; | |
643 | ||
644 | list_for_each_entry_safe(desc, iter, &ie->work_list, list) { | |
645 | list_del(&desc->list); | |
646 | idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); | |
647 | idxd_free_desc(desc->wq, desc); | |
648 | } | |
649 | } | |
650 | ||
5b0c68c4 DJ |
651 | void idxd_wqs_quiesce(struct idxd_device *idxd) |
652 | { | |
653 | struct idxd_wq *wq; | |
654 | int i; | |
655 | ||
656 | for (i = 0; i < idxd->max_wqs; i++) { | |
657 | wq = idxd->wqs[i]; | |
658 | if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) | |
659 | idxd_wq_quiesce(wq); | |
660 | } | |
661 | } | |
662 | ||
eb15e715 DJ |
663 | static void idxd_release_int_handles(struct idxd_device *idxd) |
664 | { | |
665 | struct device *dev = &idxd->pdev->dev; | |
666 | int i, rc; | |
667 | ||
668 | for (i = 0; i < idxd->num_wq_irqs; i++) { | |
669 | if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) { | |
670 | rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i], | |
671 | IDXD_IRQ_MSIX); | |
672 | if (rc < 0) | |
673 | dev_warn(dev, "irq handle %d release failed\n", | |
674 | idxd->int_handles[i]); | |
675 | else | |
676 | dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]); | |
677 | } | |
678 | } | |
679 | } | |
680 | ||
bfe1d560 DJ |
681 | static void idxd_shutdown(struct pci_dev *pdev) |
682 | { | |
683 | struct idxd_device *idxd = pci_get_drvdata(pdev); | |
684 | int rc, i; | |
685 | struct idxd_irq_entry *irq_entry; | |
686 | int msixcnt = pci_msix_vec_count(pdev); | |
bfe1d560 | 687 | |
bfe1d560 | 688 | rc = idxd_device_disable(idxd); |
bfe1d560 DJ |
689 | if (rc) |
690 | dev_err(&pdev->dev, "Disabling device failed\n"); | |
691 | ||
692 | dev_dbg(&pdev->dev, "%s called\n", __func__); | |
693 | idxd_mask_msix_vectors(idxd); | |
694 | idxd_mask_error_interrupts(idxd); | |
695 | ||
696 | for (i = 0; i < msixcnt; i++) { | |
697 | irq_entry = &idxd->irq_entries[i]; | |
5fc8e85f DJ |
698 | synchronize_irq(irq_entry->vector); |
699 | free_irq(irq_entry->vector, irq_entry); | |
bfe1d560 DJ |
700 | if (i == 0) |
701 | continue; | |
8f47d1a5 DJ |
702 | idxd_flush_pending_llist(irq_entry); |
703 | idxd_flush_work_list(irq_entry); | |
bfe1d560 | 704 | } |
0d5c10b4 | 705 | |
6df0e6c5 | 706 | idxd_msix_perm_clear(idxd); |
eb15e715 | 707 | idxd_release_int_handles(idxd); |
5fc8e85f | 708 | pci_free_irq_vectors(pdev); |
a39c7cd0 DJ |
709 | pci_iounmap(pdev, idxd->reg_base); |
710 | pci_disable_device(pdev); | |
0d5c10b4 | 711 | destroy_workqueue(idxd->wq); |
bfe1d560 DJ |
712 | } |
713 | ||
714 | static void idxd_remove(struct pci_dev *pdev) | |
715 | { | |
716 | struct idxd_device *idxd = pci_get_drvdata(pdev); | |
717 | ||
718 | dev_dbg(&pdev->dev, "%s called\n", __func__); | |
719 | idxd_shutdown(pdev); | |
8e50d392 DJ |
720 | if (device_pasid_enabled(idxd)) |
721 | idxd_disable_system_pasid(idxd); | |
47c16ac2 | 722 | idxd_unregister_devices(idxd); |
cf5f86a7 | 723 | iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); |
bfe1d560 DJ |
724 | } |
725 | ||
726 | static struct pci_driver idxd_pci_driver = { | |
727 | .name = DRV_NAME, | |
728 | .id_table = idxd_pci_tbl, | |
729 | .probe = idxd_pci_probe, | |
730 | .remove = idxd_remove, | |
731 | .shutdown = idxd_shutdown, | |
732 | }; | |
733 | ||
734 | static int __init idxd_init_module(void) | |
735 | { | |
4b73e4eb | 736 | int err; |
bfe1d560 DJ |
737 | |
738 | /* | |
8e50d392 | 739 | * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in |
bfe1d560 DJ |
740 | * enumerating the device. We can not utilize it. |
741 | */ | |
742 | if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { | |
743 | pr_warn("idxd driver failed to load without MOVDIR64B.\n"); | |
744 | return -ENODEV; | |
745 | } | |
746 | ||
8e50d392 DJ |
747 | if (!boot_cpu_has(X86_FEATURE_ENQCMD)) |
748 | pr_warn("Platform does not have ENQCMD(S) support.\n"); | |
749 | else | |
750 | support_enqcmd = true; | |
bfe1d560 | 751 | |
c52ca478 DJ |
752 | err = idxd_register_bus_type(); |
753 | if (err < 0) | |
754 | return err; | |
755 | ||
756 | err = idxd_register_driver(); | |
757 | if (err < 0) | |
758 | goto err_idxd_driver_register; | |
759 | ||
42d279f9 DJ |
760 | err = idxd_cdev_register(); |
761 | if (err) | |
762 | goto err_cdev_register; | |
763 | ||
bfe1d560 DJ |
764 | err = pci_register_driver(&idxd_pci_driver); |
765 | if (err) | |
c52ca478 | 766 | goto err_pci_register; |
bfe1d560 DJ |
767 | |
768 | return 0; | |
c52ca478 DJ |
769 | |
770 | err_pci_register: | |
42d279f9 DJ |
771 | idxd_cdev_remove(); |
772 | err_cdev_register: | |
c52ca478 DJ |
773 | idxd_unregister_driver(); |
774 | err_idxd_driver_register: | |
775 | idxd_unregister_bus_type(); | |
776 | return err; | |
bfe1d560 DJ |
777 | } |
778 | module_init(idxd_init_module); | |
779 | ||
780 | static void __exit idxd_exit_module(void) | |
781 | { | |
782 | pci_unregister_driver(&idxd_pci_driver); | |
42d279f9 | 783 | idxd_cdev_remove(); |
c52ca478 | 784 | idxd_unregister_bus_type(); |
bfe1d560 DJ |
785 | } |
786 | module_exit(idxd_exit_module); |