]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/idxd/device.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / idxd / device.c
CommitLineData
bfe1d560
DJ
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/io-64-nonatomic-lo-hi.h>
8f47d1a5 8#include <linux/dmaengine.h>
4548a6ad
DJ
9#include <linux/irq.h>
10#include <linux/msi.h>
bfe1d560 11#include <uapi/linux/idxd.h>
8f47d1a5 12#include "../dmaengine.h"
bfe1d560
DJ
13#include "idxd.h"
14#include "registers.h"
15
0d5c10b4
DJ
16static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 u32 *status);
0dcfe41e
DJ
18static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
19static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
bfe1d560
DJ
20
21/* Interrupt control bits */
4548a6ad 22void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
bfe1d560 23{
5fc8e85f 24 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
bfe1d560 25
4548a6ad 26 pci_msi_mask_irq(data);
bfe1d560
DJ
27}
28
29void idxd_mask_msix_vectors(struct idxd_device *idxd)
30{
31 struct pci_dev *pdev = idxd->pdev;
32 int msixcnt = pci_msix_vec_count(pdev);
4548a6ad 33 int i;
bfe1d560 34
4548a6ad
DJ
35 for (i = 0; i < msixcnt; i++)
36 idxd_mask_msix_vector(idxd, i);
bfe1d560
DJ
37}
38
4548a6ad 39void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
bfe1d560 40{
5fc8e85f 41 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
bfe1d560 42
4548a6ad 43 pci_msi_unmask_irq(data);
bfe1d560
DJ
44}
45
46void idxd_unmask_error_interrupts(struct idxd_device *idxd)
47{
48 union genctrl_reg genctrl;
49
50 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 genctrl.softerr_int_en = 1;
5b0c68c4 52 genctrl.halt_int_en = 1;
bfe1d560
DJ
53 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
54}
55
56void idxd_mask_error_interrupts(struct idxd_device *idxd)
57{
58 union genctrl_reg genctrl;
59
60 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
61 genctrl.softerr_int_en = 0;
5b0c68c4 62 genctrl.halt_int_en = 0;
bfe1d560
DJ
63 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
64}
65
66static void free_hw_descs(struct idxd_wq *wq)
67{
68 int i;
69
70 for (i = 0; i < wq->num_descs; i++)
71 kfree(wq->hw_descs[i]);
72
73 kfree(wq->hw_descs);
74}
75
76static int alloc_hw_descs(struct idxd_wq *wq, int num)
77{
78 struct device *dev = &wq->idxd->pdev->dev;
79 int i;
80 int node = dev_to_node(dev);
81
82 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
83 GFP_KERNEL, node);
84 if (!wq->hw_descs)
85 return -ENOMEM;
86
87 for (i = 0; i < num; i++) {
88 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
89 GFP_KERNEL, node);
90 if (!wq->hw_descs[i]) {
91 free_hw_descs(wq);
92 return -ENOMEM;
93 }
94 }
95
96 return 0;
97}
98
99static void free_descs(struct idxd_wq *wq)
100{
101 int i;
102
103 for (i = 0; i < wq->num_descs; i++)
104 kfree(wq->descs[i]);
105
106 kfree(wq->descs);
107}
108
109static int alloc_descs(struct idxd_wq *wq, int num)
110{
111 struct device *dev = &wq->idxd->pdev->dev;
112 int i;
113 int node = dev_to_node(dev);
114
115 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
116 GFP_KERNEL, node);
117 if (!wq->descs)
118 return -ENOMEM;
119
120 for (i = 0; i < num; i++) {
121 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
122 GFP_KERNEL, node);
123 if (!wq->descs[i]) {
124 free_descs(wq);
125 return -ENOMEM;
126 }
127 }
128
129 return 0;
130}
131
132/* WQ control bits */
133int idxd_wq_alloc_resources(struct idxd_wq *wq)
134{
135 struct idxd_device *idxd = wq->idxd;
bfe1d560
DJ
136 struct device *dev = &idxd->pdev->dev;
137 int rc, num_descs, i;
f25b4638
DJ
138 int align;
139 u64 tmp;
bfe1d560 140
c52ca478
DJ
141 if (wq->type != IDXD_WQT_KERNEL)
142 return 0;
143
9806eb5c
DJ
144 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
145 wq->num_descs = num_descs;
bfe1d560
DJ
146
147 rc = alloc_hw_descs(wq, num_descs);
148 if (rc < 0)
149 return rc;
150
435b512d
DJ
151 align = idxd->data->align;
152 wq->compls_size = num_descs * idxd->data->compl_size + align;
f25b4638
DJ
153 wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
154 &wq->compls_addr_raw, GFP_KERNEL);
155 if (!wq->compls_raw) {
bfe1d560
DJ
156 rc = -ENOMEM;
157 goto fail_alloc_compls;
158 }
159
f25b4638
DJ
160 /* Adjust alignment */
161 wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
162 tmp = (u64)wq->compls_raw;
163 tmp = (tmp + (align - 1)) & ~(align - 1);
164 wq->compls = (struct dsa_completion_record *)tmp;
165
bfe1d560
DJ
166 rc = alloc_descs(wq, num_descs);
167 if (rc < 0)
168 goto fail_alloc_descs;
169
0705107f
DJ
170 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
171 dev_to_node(dev));
bfe1d560
DJ
172 if (rc < 0)
173 goto fail_sbitmap_init;
174
175 for (i = 0; i < num_descs; i++) {
176 struct idxd_desc *desc = wq->descs[i];
177
178 desc->hw = wq->hw_descs[i];
435b512d 179 if (idxd->data->type == IDXD_TYPE_DSA)
f25b4638 180 desc->completion = &wq->compls[i];
435b512d 181 else if (idxd->data->type == IDXD_TYPE_IAX)
f25b4638 182 desc->iax_completion = &wq->iax_compls[i];
435b512d 183 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
bfe1d560
DJ
184 desc->id = i;
185 desc->wq = wq;
0705107f 186 desc->cpu = -1;
bfe1d560
DJ
187 }
188
189 return 0;
190
191 fail_sbitmap_init:
192 free_descs(wq);
193 fail_alloc_descs:
f25b4638
DJ
194 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
195 wq->compls_addr_raw);
bfe1d560
DJ
196 fail_alloc_compls:
197 free_hw_descs(wq);
198 return rc;
199}
200
201void idxd_wq_free_resources(struct idxd_wq *wq)
202{
203 struct device *dev = &wq->idxd->pdev->dev;
204
c52ca478
DJ
205 if (wq->type != IDXD_WQT_KERNEL)
206 return;
207
bfe1d560
DJ
208 free_hw_descs(wq);
209 free_descs(wq);
f25b4638
DJ
210 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
211 wq->compls_addr_raw);
0705107f 212 sbitmap_queue_free(&wq->sbq);
bfe1d560
DJ
213}
214
215int idxd_wq_enable(struct idxd_wq *wq)
216{
217 struct idxd_device *idxd = wq->idxd;
218 struct device *dev = &idxd->pdev->dev;
219 u32 status;
bfe1d560
DJ
220
221 if (wq->state == IDXD_WQ_ENABLED) {
222 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
223 return -ENXIO;
224 }
225
0d5c10b4 226 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
bfe1d560
DJ
227
228 if (status != IDXD_CMDSTS_SUCCESS &&
229 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
230 dev_dbg(dev, "WQ enable failed: %#x\n", status);
231 return -ENXIO;
232 }
233
234 wq->state = IDXD_WQ_ENABLED;
235 dev_dbg(dev, "WQ %d enabled\n", wq->id);
236 return 0;
237}
238
0dcfe41e 239int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
bfe1d560
DJ
240{
241 struct idxd_device *idxd = wq->idxd;
242 struct device *dev = &idxd->pdev->dev;
243 u32 status, operand;
bfe1d560 244
bfe1d560
DJ
245 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
246
247 if (wq->state != IDXD_WQ_ENABLED) {
248 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
249 return 0;
250 }
251
252 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
0d5c10b4 253 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
bfe1d560
DJ
254
255 if (status != IDXD_CMDSTS_SUCCESS) {
256 dev_dbg(dev, "WQ disable failed: %#x\n", status);
257 return -ENXIO;
258 }
259
0dcfe41e
DJ
260 if (reset_config)
261 idxd_wq_disable_cleanup(wq);
bfe1d560
DJ
262 wq->state = IDXD_WQ_DISABLED;
263 dev_dbg(dev, "WQ %d disabled\n", wq->id);
264 return 0;
265}
266
0d5c10b4
DJ
267void idxd_wq_drain(struct idxd_wq *wq)
268{
269 struct idxd_device *idxd = wq->idxd;
270 struct device *dev = &idxd->pdev->dev;
271 u32 operand;
272
273 if (wq->state != IDXD_WQ_ENABLED) {
274 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
275 return;
276 }
277
278 dev_dbg(dev, "Draining WQ %d\n", wq->id);
279 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
280 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
281}
282
ea9aadc0
DJ
283void idxd_wq_reset(struct idxd_wq *wq)
284{
285 struct idxd_device *idxd = wq->idxd;
286 struct device *dev = &idxd->pdev->dev;
287 u32 operand;
288
289 if (wq->state != IDXD_WQ_ENABLED) {
290 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
291 return;
292 }
293
294 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
295 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
0dcfe41e 296 idxd_wq_disable_cleanup(wq);
ea9aadc0
DJ
297 wq->state = IDXD_WQ_DISABLED;
298}
299
c52ca478
DJ
300int idxd_wq_map_portal(struct idxd_wq *wq)
301{
302 struct idxd_device *idxd = wq->idxd;
303 struct pci_dev *pdev = idxd->pdev;
304 struct device *dev = &pdev->dev;
305 resource_size_t start;
306
307 start = pci_resource_start(pdev, IDXD_WQ_BAR);
8326be9f 308 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
c52ca478 309
8e50d392
DJ
310 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
311 if (!wq->portal)
c52ca478 312 return -ENOMEM;
c52ca478
DJ
313
314 return 0;
315}
316
317void idxd_wq_unmap_portal(struct idxd_wq *wq)
318{
319 struct device *dev = &wq->idxd->pdev->dev;
320
8e50d392 321 devm_iounmap(dev, wq->portal);
5b0c68c4 322 wq->portal = NULL;
a9c17152 323 wq->portal_offset = 0;
5b0c68c4
DJ
324}
325
326void idxd_wqs_unmap_portal(struct idxd_device *idxd)
327{
328 int i;
329
330 for (i = 0; i < idxd->max_wqs; i++) {
331 struct idxd_wq *wq = idxd->wqs[i];
332
333 if (wq->portal)
334 idxd_wq_unmap_portal(wq);
335 }
8e50d392
DJ
336}
337
338int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
339{
340 struct idxd_device *idxd = wq->idxd;
341 int rc;
342 union wqcfg wqcfg;
343 unsigned int offset;
8e50d392 344
0dcfe41e 345 rc = idxd_wq_disable(wq, false);
8e50d392
DJ
346 if (rc < 0)
347 return rc;
348
349 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
cf84a4b9 350 spin_lock(&idxd->dev_lock);
8e50d392
DJ
351 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
352 wqcfg.pasid_en = 1;
353 wqcfg.pasid = pasid;
354 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
cf84a4b9 355 spin_unlock(&idxd->dev_lock);
8e50d392
DJ
356
357 rc = idxd_wq_enable(wq);
358 if (rc < 0)
359 return rc;
360
361 return 0;
362}
363
364int idxd_wq_disable_pasid(struct idxd_wq *wq)
365{
366 struct idxd_device *idxd = wq->idxd;
367 int rc;
368 union wqcfg wqcfg;
369 unsigned int offset;
8e50d392 370
0dcfe41e 371 rc = idxd_wq_disable(wq, false);
8e50d392
DJ
372 if (rc < 0)
373 return rc;
374
375 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
cf84a4b9 376 spin_lock(&idxd->dev_lock);
8e50d392
DJ
377 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
378 wqcfg.pasid_en = 0;
379 wqcfg.pasid = 0;
380 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
cf84a4b9 381 spin_unlock(&idxd->dev_lock);
8e50d392
DJ
382
383 rc = idxd_wq_enable(wq);
384 if (rc < 0)
385 return rc;
386
387 return 0;
c52ca478
DJ
388}
389
0dcfe41e 390static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
da32b28c
DJ
391{
392 struct idxd_device *idxd = wq->idxd;
da32b28c 393
0dcfe41e 394 lockdep_assert_held(&wq->wq_lock);
484f910e 395 memset(wq->wqcfg, 0, idxd->wqcfg_size);
da32b28c 396 wq->type = IDXD_WQT_NONE;
da32b28c
DJ
397 wq->threshold = 0;
398 wq->priority = 0;
92de5fa2 399 wq->ats_dis = 0;
da32b28c 400 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
bd2f4ae5 401 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
da32b28c 402 memset(wq->name, 0, WQ_NAME_SIZE);
da32b28c
DJ
403}
404
2d3c0d4f
DJ
405static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
406{
407 lockdep_assert_held(&wq->wq_lock);
408
409 idxd_wq_disable_cleanup(wq);
410 wq->size = 0;
411 wq->group = NULL;
412}
413
93a40a6d
DJ
414static void idxd_wq_ref_release(struct percpu_ref *ref)
415{
416 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
417
418 complete(&wq->wq_dead);
419}
420
421int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
422{
423 int rc;
424
425 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
426 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
427 if (rc < 0)
428 return rc;
429 reinit_completion(&wq->wq_dead);
430 return 0;
431}
432
433void idxd_wq_quiesce(struct idxd_wq *wq)
434{
435 percpu_ref_kill(&wq->wq_active);
436 wait_for_completion(&wq->wq_dead);
93a40a6d
DJ
437}
438
bfe1d560
DJ
439/* Device control bits */
440static inline bool idxd_is_enabled(struct idxd_device *idxd)
441{
442 union gensts_reg gensts;
443
444 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
445
446 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
447 return true;
448 return false;
449}
450
89e3becd
DJ
451static inline bool idxd_device_is_halted(struct idxd_device *idxd)
452{
453 union gensts_reg gensts;
454
455 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
456
457 return (gensts.state == IDXD_DEVICE_STATE_HALT);
458}
459
0d5c10b4
DJ
460/*
461 * This is function is only used for reset during probe and will
462 * poll for completion. Once the device is setup with interrupts,
463 * all commands will be done via interrupt completion.
464 */
89e3becd 465int idxd_device_init_reset(struct idxd_device *idxd)
bfe1d560 466{
0d5c10b4
DJ
467 struct device *dev = &idxd->pdev->dev;
468 union idxd_command_reg cmd;
bfe1d560 469
89e3becd
DJ
470 if (idxd_device_is_halted(idxd)) {
471 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
472 return -ENXIO;
473 }
474
0d5c10b4
DJ
475 memset(&cmd, 0, sizeof(cmd));
476 cmd.cmd = IDXD_CMD_RESET_DEVICE;
477 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
f9f4082d 478 spin_lock(&idxd->cmd_lock);
0d5c10b4 479 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
bfe1d560 480
0d5c10b4
DJ
481 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
482 IDXD_CMDSTS_ACTIVE)
483 cpu_relax();
f9f4082d 484 spin_unlock(&idxd->cmd_lock);
89e3becd 485 return 0;
bfe1d560
DJ
486}
487
0d5c10b4
DJ
488static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
489 u32 *status)
bfe1d560
DJ
490{
491 union idxd_command_reg cmd;
0d5c10b4 492 DECLARE_COMPLETION_ONSTACK(done);
53499d1f 493 u32 stat;
bfe1d560 494
89e3becd
DJ
495 if (idxd_device_is_halted(idxd)) {
496 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
28ac8e03
CIK
497 if (status)
498 *status = IDXD_CMDSTS_HW_ERR;
89e3becd
DJ
499 return;
500 }
501
bfe1d560
DJ
502 memset(&cmd, 0, sizeof(cmd));
503 cmd.cmd = cmd_code;
504 cmd.operand = operand;
0d5c10b4
DJ
505 cmd.int_req = 1;
506
f9f4082d 507 spin_lock(&idxd->cmd_lock);
0d5c10b4
DJ
508 wait_event_lock_irq(idxd->cmd_waitq,
509 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
53b2ee7f 510 idxd->cmd_lock);
0d5c10b4 511
bfe1d560
DJ
512 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
513 __func__, cmd_code, operand);
0d5c10b4 514
ff18de55 515 idxd->cmd_status = 0;
0d5c10b4
DJ
516 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
517 idxd->cmd_done = &done;
bfe1d560
DJ
518 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
519
0d5c10b4
DJ
520 /*
521 * After command submitted, release lock and go to sleep until
522 * the command completes via interrupt.
523 */
f9f4082d 524 spin_unlock(&idxd->cmd_lock);
0d5c10b4 525 wait_for_completion(&done);
53499d1f 526 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
f9f4082d 527 spin_lock(&idxd->cmd_lock);
53499d1f
DJ
528 if (status)
529 *status = stat;
530 idxd->cmd_status = stat & GENMASK(7, 0);
ff18de55 531
0d5c10b4
DJ
532 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
533 /* Wake up other pending commands */
534 wake_up(&idxd->cmd_waitq);
f9f4082d 535 spin_unlock(&idxd->cmd_lock);
bfe1d560
DJ
536}
537
538int idxd_device_enable(struct idxd_device *idxd)
539{
540 struct device *dev = &idxd->pdev->dev;
bfe1d560
DJ
541 u32 status;
542
bfe1d560
DJ
543 if (idxd_is_enabled(idxd)) {
544 dev_dbg(dev, "Device already enabled\n");
545 return -ENXIO;
546 }
547
0d5c10b4 548 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
bfe1d560
DJ
549
550 /* If the command is successful or if the device was enabled */
551 if (status != IDXD_CMDSTS_SUCCESS &&
552 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
553 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
554 return -ENXIO;
555 }
556
557 idxd->state = IDXD_DEV_ENABLED;
558 return 0;
559}
560
561int idxd_device_disable(struct idxd_device *idxd)
562{
563 struct device *dev = &idxd->pdev->dev;
bfe1d560
DJ
564 u32 status;
565
bfe1d560
DJ
566 if (!idxd_is_enabled(idxd)) {
567 dev_dbg(dev, "Device is not enabled\n");
568 return 0;
569 }
570
0d5c10b4 571 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
bfe1d560
DJ
572
573 /* If the command is successful or if the device was disabled */
574 if (status != IDXD_CMDSTS_SUCCESS &&
575 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
576 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
0d5c10b4 577 return -ENXIO;
bfe1d560
DJ
578 }
579
cf84a4b9 580 spin_lock(&idxd->dev_lock);
0dcfe41e 581 idxd_device_clear_state(idxd);
f52058ae 582 idxd->state = IDXD_DEV_DISABLED;
cf84a4b9 583 spin_unlock(&idxd->dev_lock);
bfe1d560
DJ
584 return 0;
585}
586
0d5c10b4 587void idxd_device_reset(struct idxd_device *idxd)
bfe1d560 588{
0d5c10b4 589 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
cf84a4b9 590 spin_lock(&idxd->dev_lock);
0dcfe41e 591 idxd_device_clear_state(idxd);
f52058ae 592 idxd->state = IDXD_DEV_DISABLED;
7178f531
DJ
593 idxd_unmask_error_interrupts(idxd);
594 idxd_msix_perm_setup(idxd);
cf84a4b9 595 spin_unlock(&idxd->dev_lock);
bfe1d560
DJ
596}
597
8e50d392
DJ
598void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
599{
600 struct device *dev = &idxd->pdev->dev;
601 u32 operand;
602
603 operand = pasid;
604 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
605 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
606 dev_dbg(dev, "pasid %d drained\n", pasid);
607}
608
eb15e715
DJ
609int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
610 enum idxd_interrupt_type irq_type)
611{
612 struct device *dev = &idxd->pdev->dev;
613 u32 operand, status;
614
615 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
616 return -EOPNOTSUPP;
617
618 dev_dbg(dev, "get int handle, idx %d\n", idx);
619
620 operand = idx & GENMASK(15, 0);
621 if (irq_type == IDXD_IRQ_IMS)
622 operand |= CMD_INT_HANDLE_IMS;
623
624 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
625
626 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
627
628 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
629 dev_dbg(dev, "request int handle failed: %#x\n", status);
630 return -ENXIO;
631 }
632
633 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
634
635 dev_dbg(dev, "int handle acquired: %u\n", *handle);
636 return 0;
637}
638
639int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
640 enum idxd_interrupt_type irq_type)
641{
642 struct device *dev = &idxd->pdev->dev;
643 u32 operand, status;
644 union idxd_command_reg cmd;
eb15e715
DJ
645
646 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
647 return -EOPNOTSUPP;
648
649 dev_dbg(dev, "release int handle, handle %d\n", handle);
650
651 memset(&cmd, 0, sizeof(cmd));
652 operand = handle & GENMASK(15, 0);
653
654 if (irq_type == IDXD_IRQ_IMS)
655 operand |= CMD_INT_HANDLE_IMS;
656
657 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
658 cmd.operand = operand;
659
660 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
661
f9f4082d 662 spin_lock(&idxd->cmd_lock);
eb15e715
DJ
663 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
664
665 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
666 cpu_relax();
667 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
f9f4082d 668 spin_unlock(&idxd->cmd_lock);
eb15e715
DJ
669
670 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
671 dev_dbg(dev, "release int handle failed: %#x\n", status);
672 return -ENXIO;
673 }
674
675 dev_dbg(dev, "int handle released.\n");
676 return 0;
677}
678
bfe1d560 679/* Device configuration bits */
0dcfe41e
DJ
680static void idxd_engines_clear_state(struct idxd_device *idxd)
681{
682 struct idxd_engine *engine;
683 int i;
684
685 lockdep_assert_held(&idxd->dev_lock);
686 for (i = 0; i < idxd->max_engines; i++) {
687 engine = idxd->engines[i];
688 engine->group = NULL;
689 }
690}
691
692static void idxd_groups_clear_state(struct idxd_device *idxd)
693{
694 struct idxd_group *group;
695 int i;
696
697 lockdep_assert_held(&idxd->dev_lock);
698 for (i = 0; i < idxd->max_groups; i++) {
699 group = idxd->groups[i];
700 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
701 group->num_engines = 0;
702 group->num_wqs = 0;
4f1c25f9
DJ
703 group->use_rdbuf_limit = false;
704 group->rdbufs_allowed = 0;
705 group->rdbufs_reserved = 0;
31c597ed
DJ
706 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
707 group->tc_a = 1;
708 group->tc_b = 1;
709 } else {
710 group->tc_a = -1;
711 group->tc_b = -1;
712 }
0dcfe41e
DJ
713 }
714}
715
716static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
717{
718 int i;
719
720 lockdep_assert_held(&idxd->dev_lock);
721 for (i = 0; i < idxd->max_wqs; i++) {
722 struct idxd_wq *wq = idxd->wqs[i];
723
724 if (wq->state == IDXD_WQ_ENABLED) {
725 idxd_wq_disable_cleanup(wq);
2d3c0d4f 726 idxd_wq_device_reset_cleanup(wq);
0dcfe41e
DJ
727 wq->state = IDXD_WQ_DISABLED;
728 }
729 }
730}
731
732void idxd_device_clear_state(struct idxd_device *idxd)
733{
734 idxd_groups_clear_state(idxd);
735 idxd_engines_clear_state(idxd);
736 idxd_device_wqs_clear_state(idxd);
737}
738
6df0e6c5
DJ
739void idxd_msix_perm_setup(struct idxd_device *idxd)
740{
741 union msix_perm mperm;
742 int i, msixcnt;
743
744 msixcnt = pci_msix_vec_count(idxd->pdev);
745 if (msixcnt < 0)
746 return;
747
748 mperm.bits = 0;
749 mperm.pasid = idxd->pasid;
750 mperm.pasid_en = device_pasid_enabled(idxd);
751 for (i = 1; i < msixcnt; i++)
752 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
753}
754
755void idxd_msix_perm_clear(struct idxd_device *idxd)
756{
757 union msix_perm mperm;
758 int i, msixcnt;
759
760 msixcnt = pci_msix_vec_count(idxd->pdev);
761 if (msixcnt < 0)
762 return;
763
764 mperm.bits = 0;
765 for (i = 1; i < msixcnt; i++)
766 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
767}
768
bfe1d560
DJ
769static void idxd_group_config_write(struct idxd_group *group)
770{
771 struct idxd_device *idxd = group->idxd;
772 struct device *dev = &idxd->pdev->dev;
773 int i;
774 u32 grpcfg_offset;
775
776 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
777
778 /* setup GRPWQCFG */
5a712701
DJ
779 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
780 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
781 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
bfe1d560
DJ
782 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
783 group->id, i, grpcfg_offset,
784 ioread64(idxd->reg_base + grpcfg_offset));
785 }
786
787 /* setup GRPENGCFG */
5a712701 788 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
bfe1d560
DJ
789 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
790 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
791 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
792
793 /* setup GRPFLAGS */
5a712701 794 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
bfe1d560
DJ
795 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
796 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
797 group->id, grpcfg_offset,
798 ioread32(idxd->reg_base + grpcfg_offset));
799}
800
801static int idxd_groups_config_write(struct idxd_device *idxd)
802
803{
804 union gencfg_reg reg;
805 int i;
806 struct device *dev = &idxd->pdev->dev;
807
4f1c25f9
DJ
808 /* Setup bandwidth rdbuf limit */
809 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
bfe1d560 810 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
4f1c25f9 811 reg.rdbuf_limit = idxd->rdbuf_limit;
bfe1d560
DJ
812 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
813 }
814
815 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
816 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
817
818 for (i = 0; i < idxd->max_groups; i++) {
defe49f9 819 struct idxd_group *group = idxd->groups[i];
bfe1d560
DJ
820
821 idxd_group_config_write(group);
822 }
823
824 return 0;
825}
826
d8071323
DJ
827static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
828{
829 struct pci_dev *pdev = idxd->pdev;
830
831 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
832 return true;
833 return false;
834}
835
bfe1d560
DJ
836static int idxd_wq_config_write(struct idxd_wq *wq)
837{
838 struct idxd_device *idxd = wq->idxd;
839 struct device *dev = &idxd->pdev->dev;
840 u32 wq_offset;
841 int i;
842
843 if (!wq->group)
844 return 0;
845
ea9aadc0
DJ
846 /*
847 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
848 * wq reset. This will copy back the sticky values that are present on some devices.
849 */
850 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
851 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
852 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
853 }
bfe1d560
DJ
854
855 /* byte 0-3 */
484f910e 856 wq->wqcfg->wq_size = wq->size;
bfe1d560
DJ
857
858 if (wq->size == 0) {
125d1037 859 idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
bfe1d560
DJ
860 dev_warn(dev, "Incorrect work queue size: 0\n");
861 return -EINVAL;
862 }
863
864 /* bytes 4-7 */
484f910e 865 wq->wqcfg->wq_thresh = wq->threshold;
bfe1d560
DJ
866
867 /* byte 8-11 */
8e50d392
DJ
868 if (wq_dedicated(wq))
869 wq->wqcfg->mode = 1;
870
871 if (device_pasid_enabled(idxd)) {
872 wq->wqcfg->pasid_en = 1;
873 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
874 wq->wqcfg->pasid = idxd->pasid;
875 }
876
d8071323
DJ
877 /*
878 * Here the priv bit is set depending on the WQ type. priv = 1 if the
879 * WQ type is kernel to indicate privileged access. This setting only
880 * matters for dedicated WQ. According to the DSA spec:
881 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
882 * Privileged Mode Enable field of the PCI Express PASID capability
883 * is 0, this field must be 0.
884 *
885 * In the case of a dedicated kernel WQ that is not able to support
886 * the PASID cap, then the configuration will be rejected.
887 */
888 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
889 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
890 !idxd_device_pasid_priv_enabled(idxd) &&
891 wq->type == IDXD_WQT_KERNEL) {
892 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
893 return -EOPNOTSUPP;
894 }
895
484f910e 896 wq->wqcfg->priority = wq->priority;
bfe1d560 897
8e50d392
DJ
898 if (idxd->hw.gen_cap.block_on_fault &&
899 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
900 wq->wqcfg->bof = 1;
901
92de5fa2
DJ
902 if (idxd->hw.wq_cap.wq_ats_support)
903 wq->wqcfg->wq_ats_disable = wq->ats_dis;
904
bfe1d560 905 /* bytes 12-15 */
484f910e
DJ
906 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
907 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
bfe1d560
DJ
908
909 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
484f910e
DJ
910 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
911 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
912 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
bfe1d560
DJ
913 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
914 wq->id, i, wq_offset,
915 ioread32(idxd->reg_base + wq_offset));
916 }
917
918 return 0;
919}
920
921static int idxd_wqs_config_write(struct idxd_device *idxd)
922{
923 int i, rc;
924
925 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 926 struct idxd_wq *wq = idxd->wqs[i];
bfe1d560
DJ
927
928 rc = idxd_wq_config_write(wq);
929 if (rc < 0)
930 return rc;
931 }
932
933 return 0;
934}
935
936static void idxd_group_flags_setup(struct idxd_device *idxd)
937{
938 int i;
939
940 /* TC-A 0 and TC-B 1 should be defaults */
941 for (i = 0; i < idxd->max_groups; i++) {
defe49f9 942 struct idxd_group *group = idxd->groups[i];
bfe1d560
DJ
943
944 if (group->tc_a == -1)
a1fcaf07 945 group->tc_a = group->grpcfg.flags.tc_a = 0;
bfe1d560
DJ
946 else
947 group->grpcfg.flags.tc_a = group->tc_a;
948 if (group->tc_b == -1)
a1fcaf07 949 group->tc_b = group->grpcfg.flags.tc_b = 1;
bfe1d560
DJ
950 else
951 group->grpcfg.flags.tc_b = group->tc_b;
4f1c25f9
DJ
952 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
953 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
954 if (group->rdbufs_allowed)
955 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
bfe1d560 956 else
4f1c25f9 957 group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
bfe1d560
DJ
958 }
959}
960
961static int idxd_engines_setup(struct idxd_device *idxd)
962{
963 int i, engines = 0;
964 struct idxd_engine *eng;
965 struct idxd_group *group;
966
967 for (i = 0; i < idxd->max_groups; i++) {
defe49f9 968 group = idxd->groups[i];
bfe1d560
DJ
969 group->grpcfg.engines = 0;
970 }
971
972 for (i = 0; i < idxd->max_engines; i++) {
75b91130 973 eng = idxd->engines[i];
bfe1d560
DJ
974 group = eng->group;
975
976 if (!group)
977 continue;
978
979 group->grpcfg.engines |= BIT(eng->id);
980 engines++;
981 }
982
983 if (!engines)
984 return -EINVAL;
985
986 return 0;
987}
988
989static int idxd_wqs_setup(struct idxd_device *idxd)
990{
991 struct idxd_wq *wq;
992 struct idxd_group *group;
993 int i, j, configured = 0;
994 struct device *dev = &idxd->pdev->dev;
995
996 for (i = 0; i < idxd->max_groups; i++) {
defe49f9 997 group = idxd->groups[i];
bfe1d560
DJ
998 for (j = 0; j < 4; j++)
999 group->grpcfg.wqs[j] = 0;
1000 }
1001
1002 for (i = 0; i < idxd->max_wqs; i++) {
7c5dd23e 1003 wq = idxd->wqs[i];
bfe1d560
DJ
1004 group = wq->group;
1005
1006 if (!wq->group)
1007 continue;
1008 if (!wq->size)
1009 continue;
1010
8e50d392 1011 if (wq_shared(wq) && !device_swq_supported(idxd)) {
125d1037 1012 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
8e50d392 1013 dev_warn(dev, "No shared wq support but configured.\n");
bfe1d560
DJ
1014 return -EINVAL;
1015 }
1016
1017 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1018 configured++;
1019 }
1020
125d1037
DJ
1021 if (configured == 0) {
1022 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
bfe1d560 1023 return -EINVAL;
125d1037 1024 }
bfe1d560
DJ
1025
1026 return 0;
1027}
1028
1029int idxd_device_config(struct idxd_device *idxd)
1030{
1031 int rc;
1032
1033 lockdep_assert_held(&idxd->dev_lock);
1034 rc = idxd_wqs_setup(idxd);
1035 if (rc < 0)
1036 return rc;
1037
1038 rc = idxd_engines_setup(idxd);
1039 if (rc < 0)
1040 return rc;
1041
1042 idxd_group_flags_setup(idxd);
1043
1044 rc = idxd_wqs_config_write(idxd);
1045 if (rc < 0)
1046 return rc;
1047
1048 rc = idxd_groups_config_write(idxd);
1049 if (rc < 0)
1050 return rc;
1051
1052 return 0;
1053}
8c66bbdc
DJ
1054
1055static int idxd_wq_load_config(struct idxd_wq *wq)
1056{
1057 struct idxd_device *idxd = wq->idxd;
1058 struct device *dev = &idxd->pdev->dev;
1059 int wqcfg_offset;
1060 int i;
1061
1062 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1063 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1064
1065 wq->size = wq->wqcfg->wq_size;
1066 wq->threshold = wq->wqcfg->wq_thresh;
1067 if (wq->wqcfg->priv)
1068 wq->type = IDXD_WQT_KERNEL;
1069
1070 /* The driver does not support shared WQ mode in read-only config yet */
1071 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1072 return -EOPNOTSUPP;
1073
1074 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1075
1076 wq->priority = wq->wqcfg->priority;
1077
1078 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1079 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1080 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1081 }
1082
1083 return 0;
1084}
1085
1086static void idxd_group_load_config(struct idxd_group *group)
1087{
1088 struct idxd_device *idxd = group->idxd;
1089 struct device *dev = &idxd->pdev->dev;
1090 int i, j, grpcfg_offset;
1091
1092 /*
1093 * Load WQS bit fields
1094 * Iterate through all 256 bits 64 bits at a time
1095 */
1096 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1097 struct idxd_wq *wq;
1098
1099 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1100 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1101 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1102 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1103
1104 if (i * 64 >= idxd->max_wqs)
1105 break;
1106
1107 /* Iterate through all 64 bits and check for wq set */
1108 for (j = 0; j < 64; j++) {
1109 int id = i * 64 + j;
1110
1111 /* No need to check beyond max wqs */
1112 if (id >= idxd->max_wqs)
1113 break;
1114
1115 /* Set group assignment for wq if wq bit is set */
1116 if (group->grpcfg.wqs[i] & BIT(j)) {
1117 wq = idxd->wqs[id];
1118 wq->group = group;
1119 }
1120 }
1121 }
1122
1123 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1124 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1125 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1126 grpcfg_offset, group->grpcfg.engines);
1127
1128 /* Iterate through all 64 bits to check engines set */
1129 for (i = 0; i < 64; i++) {
1130 if (i >= idxd->max_engines)
1131 break;
1132
1133 if (group->grpcfg.engines & BIT(i)) {
1134 struct idxd_engine *engine = idxd->engines[i];
1135
1136 engine->group = group;
1137 }
1138 }
1139
1140 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1141 group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
1142 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
1143 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1144}
1145
1146int idxd_device_load_config(struct idxd_device *idxd)
1147{
1148 union gencfg_reg reg;
1149 int i, rc;
1150
1151 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
4f1c25f9 1152 idxd->rdbuf_limit = reg.rdbuf_limit;
8c66bbdc
DJ
1153
1154 for (i = 0; i < idxd->max_groups; i++) {
1155 struct idxd_group *group = idxd->groups[i];
1156
1157 idxd_group_load_config(group);
1158 }
1159
1160 for (i = 0; i < idxd->max_wqs; i++) {
1161 struct idxd_wq *wq = idxd->wqs[i];
1162
1163 rc = idxd_wq_load_config(wq);
1164 if (rc < 0)
1165 return rc;
1166 }
1167
1168 return 0;
1169}
1f2bb403 1170
0cda4f69 1171int __drv_enable_wq(struct idxd_wq *wq)
1f2bb403
DJ
1172{
1173 struct idxd_device *idxd = wq->idxd;
1174 struct device *dev = &idxd->pdev->dev;
1f2bb403
DJ
1175 int rc = -ENXIO;
1176
1177 lockdep_assert_held(&wq->wq_lock);
1178
125d1037
DJ
1179 if (idxd->state != IDXD_DEV_ENABLED) {
1180 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1f2bb403 1181 goto err;
125d1037 1182 }
1f2bb403
DJ
1183
1184 if (wq->state != IDXD_WQ_DISABLED) {
1185 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
125d1037 1186 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1f2bb403
DJ
1187 rc = -EBUSY;
1188 goto err;
1189 }
1190
1191 if (!wq->group) {
1192 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
125d1037 1193 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1f2bb403
DJ
1194 goto err;
1195 }
1196
1197 if (strlen(wq->name) == 0) {
125d1037 1198 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1f2bb403
DJ
1199 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1200 goto err;
1201 }
1202
1203 /* Shared WQ checks */
1204 if (wq_shared(wq)) {
1205 if (!device_swq_supported(idxd)) {
125d1037 1206 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1f2bb403
DJ
1207 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1208 goto err;
1209 }
1210 /*
1211 * Shared wq with the threshold set to 0 means the user
1212 * did not set the threshold or transitioned from a
1213 * dedicated wq but did not set threshold. A value
1214 * of 0 would effectively disable the shared wq. The
1215 * driver does not allow a value of 0 to be set for
1216 * threshold via sysfs.
1217 */
1218 if (wq->threshold == 0) {
125d1037 1219 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1f2bb403
DJ
1220 dev_dbg(dev, "Shared wq and threshold 0.\n");
1221 goto err;
1222 }
1223 }
1224
0cda4f69 1225 rc = 0;
cf84a4b9 1226 spin_lock(&idxd->dev_lock);
1f2bb403
DJ
1227 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1228 rc = idxd_device_config(idxd);
cf84a4b9 1229 spin_unlock(&idxd->dev_lock);
1f2bb403
DJ
1230 if (rc < 0) {
1231 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1232 goto err;
1233 }
1234
1235 rc = idxd_wq_enable(wq);
1236 if (rc < 0) {
1237 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1238 goto err;
1239 }
1240
1241 rc = idxd_wq_map_portal(wq);
1242 if (rc < 0) {
125d1037 1243 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1f2bb403
DJ
1244 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1245 goto err_map_portal;
1246 }
1247
1248 wq->client_count = 0;
1f2bb403
DJ
1249 return 0;
1250
1f2bb403
DJ
1251err_map_portal:
1252 rc = idxd_wq_disable(wq, false);
1253 if (rc < 0)
1254 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1255err:
1256 return rc;
1257}
1258
1259int drv_enable_wq(struct idxd_wq *wq)
1260{
1261 int rc;
1262
1263 mutex_lock(&wq->wq_lock);
1264 rc = __drv_enable_wq(wq);
1265 mutex_unlock(&wq->wq_lock);
1266 return rc;
1267}
69e4f8be 1268
0cda4f69 1269void __drv_disable_wq(struct idxd_wq *wq)
69e4f8be
DJ
1270{
1271 struct idxd_device *idxd = wq->idxd;
1272 struct device *dev = &idxd->pdev->dev;
1273
1274 lockdep_assert_held(&wq->wq_lock);
1275
69e4f8be
DJ
1276 if (idxd_wq_refcount(wq))
1277 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1278 wq->id, idxd_wq_refcount(wq));
1279
1280 idxd_wq_unmap_portal(wq);
1281
1282 idxd_wq_drain(wq);
1283 idxd_wq_reset(wq);
1284
69e4f8be 1285 wq->client_count = 0;
69e4f8be
DJ
1286}
1287
1288void drv_disable_wq(struct idxd_wq *wq)
1289{
1290 mutex_lock(&wq->wq_lock);
1291 __drv_disable_wq(wq);
1292 mutex_unlock(&wq->wq_lock);
1293}
bd42805b
DJ
1294
1295int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1296{
1297 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
bd42805b
DJ
1298 int rc = 0;
1299
1300 /*
1301 * Device should be in disabled state for the idxd_drv to load. If it's in
1302 * enabled state, then the device was altered outside of driver's control.
1303 * If the state is in halted state, then we don't want to proceed.
1304 */
125d1037
DJ
1305 if (idxd->state != IDXD_DEV_DISABLED) {
1306 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
bd42805b 1307 return -ENXIO;
125d1037 1308 }
bd42805b
DJ
1309
1310 /* Device configuration */
cf84a4b9 1311 spin_lock(&idxd->dev_lock);
bd42805b
DJ
1312 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1313 rc = idxd_device_config(idxd);
cf84a4b9 1314 spin_unlock(&idxd->dev_lock);
bd42805b
DJ
1315 if (rc < 0)
1316 return -ENXIO;
1317
1318 /* Start device */
1319 rc = idxd_device_enable(idxd);
1320 if (rc < 0)
1321 return rc;
1322
1323 /* Setup DMA device without channels */
1324 rc = idxd_register_dma_device(idxd);
1325 if (rc < 0) {
1326 idxd_device_disable(idxd);
125d1037 1327 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
bd42805b
DJ
1328 return rc;
1329 }
1330
125d1037 1331 idxd->cmd_status = 0;
bd42805b
DJ
1332 return 0;
1333}
745e92a6
DJ
1334
1335void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1336{
1337 struct device *dev = &idxd_dev->conf_dev;
1338 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1339 int i;
1340
1341 for (i = 0; i < idxd->max_wqs; i++) {
1342 struct idxd_wq *wq = idxd->wqs[i];
1343 struct device *wq_dev = wq_confdev(wq);
1344
1345 if (wq->state == IDXD_WQ_DISABLED)
1346 continue;
1347 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1348 device_release_driver(wq_dev);
1349 }
1350
1351 idxd_unregister_dma_device(idxd);
1352 idxd_device_disable(idxd);
1353 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1354 idxd_device_reset(idxd);
1355}
034b3290
DJ
1356
1357static enum idxd_dev_type dev_types[] = {
1358 IDXD_DEV_DSA,
1359 IDXD_DEV_IAX,
1360 IDXD_DEV_NONE,
1361};
1362
1363struct idxd_device_driver idxd_drv = {
1364 .type = dev_types,
1365 .probe = idxd_device_drv_probe,
1366 .remove = idxd_device_drv_remove,
1367 .name = "idxd",
1368};
6e7f3ee9 1369EXPORT_SYMBOL_GPL(idxd_drv);