1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device
*idxd
, int cmd_code
, u32 operand
,
18 static void idxd_device_wqs_clear_state(struct idxd_device
*idxd
);
19 static void idxd_wq_disable_cleanup(struct idxd_wq
*wq
);
21 /* Interrupt control bits */
22 void idxd_unmask_error_interrupts(struct idxd_device
*idxd
)
24 union genctrl_reg genctrl
;
26 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
27 genctrl
.softerr_int_en
= 1;
28 genctrl
.halt_int_en
= 1;
29 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
32 void idxd_mask_error_interrupts(struct idxd_device
*idxd
)
34 union genctrl_reg genctrl
;
36 genctrl
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
37 genctrl
.softerr_int_en
= 0;
38 genctrl
.halt_int_en
= 0;
39 iowrite32(genctrl
.bits
, idxd
->reg_base
+ IDXD_GENCTRL_OFFSET
);
42 static void free_hw_descs(struct idxd_wq
*wq
)
46 for (i
= 0; i
< wq
->num_descs
; i
++)
47 kfree(wq
->hw_descs
[i
]);
52 static int alloc_hw_descs(struct idxd_wq
*wq
, int num
)
54 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
56 int node
= dev_to_node(dev
);
58 wq
->hw_descs
= kcalloc_node(num
, sizeof(struct dsa_hw_desc
*),
63 for (i
= 0; i
< num
; i
++) {
64 wq
->hw_descs
[i
] = kzalloc_node(sizeof(*wq
->hw_descs
[i
]),
66 if (!wq
->hw_descs
[i
]) {
75 static void free_descs(struct idxd_wq
*wq
)
79 for (i
= 0; i
< wq
->num_descs
; i
++)
85 static int alloc_descs(struct idxd_wq
*wq
, int num
)
87 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
89 int node
= dev_to_node(dev
);
91 wq
->descs
= kcalloc_node(num
, sizeof(struct idxd_desc
*),
96 for (i
= 0; i
< num
; i
++) {
97 wq
->descs
[i
] = kzalloc_node(sizeof(*wq
->descs
[i
]),
108 /* WQ control bits */
109 int idxd_wq_alloc_resources(struct idxd_wq
*wq
)
111 struct idxd_device
*idxd
= wq
->idxd
;
112 struct device
*dev
= &idxd
->pdev
->dev
;
113 int rc
, num_descs
, i
;
115 if (wq
->type
!= IDXD_WQT_KERNEL
)
118 num_descs
= wq_dedicated(wq
) ? wq
->size
: wq
->threshold
;
119 wq
->num_descs
= num_descs
;
121 rc
= alloc_hw_descs(wq
, num_descs
);
125 wq
->compls_size
= num_descs
* idxd
->data
->compl_size
;
126 wq
->compls
= dma_alloc_coherent(dev
, wq
->compls_size
, &wq
->compls_addr
, GFP_KERNEL
);
129 goto fail_alloc_compls
;
132 rc
= alloc_descs(wq
, num_descs
);
134 goto fail_alloc_descs
;
136 rc
= sbitmap_queue_init_node(&wq
->sbq
, num_descs
, -1, false, GFP_KERNEL
,
139 goto fail_sbitmap_init
;
141 for (i
= 0; i
< num_descs
; i
++) {
142 struct idxd_desc
*desc
= wq
->descs
[i
];
144 desc
->hw
= wq
->hw_descs
[i
];
145 if (idxd
->data
->type
== IDXD_TYPE_DSA
)
146 desc
->completion
= &wq
->compls
[i
];
147 else if (idxd
->data
->type
== IDXD_TYPE_IAX
)
148 desc
->iax_completion
= &wq
->iax_compls
[i
];
149 desc
->compl_dma
= wq
->compls_addr
+ idxd
->data
->compl_size
* i
;
160 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
166 void idxd_wq_free_resources(struct idxd_wq
*wq
)
168 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
170 if (wq
->type
!= IDXD_WQT_KERNEL
)
175 dma_free_coherent(dev
, wq
->compls_size
, wq
->compls
, wq
->compls_addr
);
176 sbitmap_queue_free(&wq
->sbq
);
179 int idxd_wq_enable(struct idxd_wq
*wq
)
181 struct idxd_device
*idxd
= wq
->idxd
;
182 struct device
*dev
= &idxd
->pdev
->dev
;
185 if (wq
->state
== IDXD_WQ_ENABLED
) {
186 dev_dbg(dev
, "WQ %d already enabled\n", wq
->id
);
190 idxd_cmd_exec(idxd
, IDXD_CMD_ENABLE_WQ
, wq
->id
, &status
);
192 if (status
!= IDXD_CMDSTS_SUCCESS
&&
193 status
!= IDXD_CMDSTS_ERR_WQ_ENABLED
) {
194 dev_dbg(dev
, "WQ enable failed: %#x\n", status
);
198 wq
->state
= IDXD_WQ_ENABLED
;
199 dev_dbg(dev
, "WQ %d enabled\n", wq
->id
);
203 int idxd_wq_disable(struct idxd_wq
*wq
, bool reset_config
)
205 struct idxd_device
*idxd
= wq
->idxd
;
206 struct device
*dev
= &idxd
->pdev
->dev
;
209 dev_dbg(dev
, "Disabling WQ %d\n", wq
->id
);
211 if (wq
->state
!= IDXD_WQ_ENABLED
) {
212 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
216 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
217 idxd_cmd_exec(idxd
, IDXD_CMD_DISABLE_WQ
, operand
, &status
);
219 if (status
!= IDXD_CMDSTS_SUCCESS
) {
220 dev_dbg(dev
, "WQ disable failed: %#x\n", status
);
225 idxd_wq_disable_cleanup(wq
);
226 wq
->state
= IDXD_WQ_DISABLED
;
227 dev_dbg(dev
, "WQ %d disabled\n", wq
->id
);
231 void idxd_wq_drain(struct idxd_wq
*wq
)
233 struct idxd_device
*idxd
= wq
->idxd
;
234 struct device
*dev
= &idxd
->pdev
->dev
;
237 if (wq
->state
!= IDXD_WQ_ENABLED
) {
238 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
242 dev_dbg(dev
, "Draining WQ %d\n", wq
->id
);
243 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
244 idxd_cmd_exec(idxd
, IDXD_CMD_DRAIN_WQ
, operand
, NULL
);
247 void idxd_wq_reset(struct idxd_wq
*wq
)
249 struct idxd_device
*idxd
= wq
->idxd
;
250 struct device
*dev
= &idxd
->pdev
->dev
;
253 if (wq
->state
!= IDXD_WQ_ENABLED
) {
254 dev_dbg(dev
, "WQ %d in wrong state: %d\n", wq
->id
, wq
->state
);
258 operand
= BIT(wq
->id
% 16) | ((wq
->id
/ 16) << 16);
259 idxd_cmd_exec(idxd
, IDXD_CMD_RESET_WQ
, operand
, NULL
);
260 idxd_wq_disable_cleanup(wq
);
261 wq
->state
= IDXD_WQ_DISABLED
;
264 int idxd_wq_map_portal(struct idxd_wq
*wq
)
266 struct idxd_device
*idxd
= wq
->idxd
;
267 struct pci_dev
*pdev
= idxd
->pdev
;
268 struct device
*dev
= &pdev
->dev
;
269 resource_size_t start
;
271 start
= pci_resource_start(pdev
, IDXD_WQ_BAR
);
272 start
+= idxd_get_wq_portal_full_offset(wq
->id
, IDXD_PORTAL_LIMITED
);
274 wq
->portal
= devm_ioremap(dev
, start
, IDXD_PORTAL_SIZE
);
281 void idxd_wq_unmap_portal(struct idxd_wq
*wq
)
283 struct device
*dev
= &wq
->idxd
->pdev
->dev
;
285 devm_iounmap(dev
, wq
->portal
);
287 wq
->portal_offset
= 0;
290 void idxd_wqs_unmap_portal(struct idxd_device
*idxd
)
294 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
295 struct idxd_wq
*wq
= idxd
->wqs
[i
];
298 idxd_wq_unmap_portal(wq
);
302 static void __idxd_wq_set_priv_locked(struct idxd_wq
*wq
, int priv
)
304 struct idxd_device
*idxd
= wq
->idxd
;
308 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_PRIVL_IDX
);
309 spin_lock(&idxd
->dev_lock
);
310 wqcfg
.bits
[WQCFG_PRIVL_IDX
] = ioread32(idxd
->reg_base
+ offset
);
312 wq
->wqcfg
->bits
[WQCFG_PRIVL_IDX
] = wqcfg
.bits
[WQCFG_PRIVL_IDX
];
313 iowrite32(wqcfg
.bits
[WQCFG_PRIVL_IDX
], idxd
->reg_base
+ offset
);
314 spin_unlock(&idxd
->dev_lock
);
317 static void __idxd_wq_set_pasid_locked(struct idxd_wq
*wq
, int pasid
)
319 struct idxd_device
*idxd
= wq
->idxd
;
323 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_PASID_IDX
);
324 spin_lock(&idxd
->dev_lock
);
325 wqcfg
.bits
[WQCFG_PASID_IDX
] = ioread32(idxd
->reg_base
+ offset
);
328 wq
->wqcfg
->bits
[WQCFG_PASID_IDX
] = wqcfg
.bits
[WQCFG_PASID_IDX
];
329 iowrite32(wqcfg
.bits
[WQCFG_PASID_IDX
], idxd
->reg_base
+ offset
);
330 spin_unlock(&idxd
->dev_lock
);
333 int idxd_wq_set_pasid(struct idxd_wq
*wq
, int pasid
)
337 rc
= idxd_wq_disable(wq
, false);
341 __idxd_wq_set_pasid_locked(wq
, pasid
);
343 rc
= idxd_wq_enable(wq
);
350 int idxd_wq_disable_pasid(struct idxd_wq
*wq
)
352 struct idxd_device
*idxd
= wq
->idxd
;
357 rc
= idxd_wq_disable(wq
, false);
361 offset
= WQCFG_OFFSET(idxd
, wq
->id
, WQCFG_PASID_IDX
);
362 spin_lock(&idxd
->dev_lock
);
363 wqcfg
.bits
[WQCFG_PASID_IDX
] = ioread32(idxd
->reg_base
+ offset
);
366 iowrite32(wqcfg
.bits
[WQCFG_PASID_IDX
], idxd
->reg_base
+ offset
);
367 spin_unlock(&idxd
->dev_lock
);
369 rc
= idxd_wq_enable(wq
);
376 static void idxd_wq_disable_cleanup(struct idxd_wq
*wq
)
378 struct idxd_device
*idxd
= wq
->idxd
;
380 lockdep_assert_held(&wq
->wq_lock
);
381 memset(wq
->wqcfg
, 0, idxd
->wqcfg_size
);
382 wq
->type
= IDXD_WQT_NONE
;
386 wq
->enqcmds_retries
= IDXD_ENQCMDS_RETRIES
;
387 clear_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
388 clear_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
);
389 memset(wq
->name
, 0, WQ_NAME_SIZE
);
390 wq
->max_xfer_bytes
= WQ_DEFAULT_MAX_XFER
;
391 wq
->max_batch_size
= WQ_DEFAULT_MAX_BATCH
;
394 static void idxd_wq_device_reset_cleanup(struct idxd_wq
*wq
)
396 lockdep_assert_held(&wq
->wq_lock
);
402 static void idxd_wq_ref_release(struct percpu_ref
*ref
)
404 struct idxd_wq
*wq
= container_of(ref
, struct idxd_wq
, wq_active
);
406 complete(&wq
->wq_dead
);
409 int idxd_wq_init_percpu_ref(struct idxd_wq
*wq
)
413 memset(&wq
->wq_active
, 0, sizeof(wq
->wq_active
));
414 rc
= percpu_ref_init(&wq
->wq_active
, idxd_wq_ref_release
,
415 PERCPU_REF_ALLOW_REINIT
, GFP_KERNEL
);
418 reinit_completion(&wq
->wq_dead
);
419 reinit_completion(&wq
->wq_resurrect
);
423 void __idxd_wq_quiesce(struct idxd_wq
*wq
)
425 lockdep_assert_held(&wq
->wq_lock
);
426 reinit_completion(&wq
->wq_resurrect
);
427 percpu_ref_kill(&wq
->wq_active
);
428 complete_all(&wq
->wq_resurrect
);
429 wait_for_completion(&wq
->wq_dead
);
432 void idxd_wq_quiesce(struct idxd_wq
*wq
)
434 mutex_lock(&wq
->wq_lock
);
435 __idxd_wq_quiesce(wq
);
436 mutex_unlock(&wq
->wq_lock
);
439 /* Device control bits */
440 static inline bool idxd_is_enabled(struct idxd_device
*idxd
)
442 union gensts_reg gensts
;
444 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
446 if (gensts
.state
== IDXD_DEVICE_STATE_ENABLED
)
451 static inline bool idxd_device_is_halted(struct idxd_device
*idxd
)
453 union gensts_reg gensts
;
455 gensts
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENSTATS_OFFSET
);
457 return (gensts
.state
== IDXD_DEVICE_STATE_HALT
);
461 * This is function is only used for reset during probe and will
462 * poll for completion. Once the device is setup with interrupts,
463 * all commands will be done via interrupt completion.
465 int idxd_device_init_reset(struct idxd_device
*idxd
)
467 struct device
*dev
= &idxd
->pdev
->dev
;
468 union idxd_command_reg cmd
;
470 if (idxd_device_is_halted(idxd
)) {
471 dev_warn(&idxd
->pdev
->dev
, "Device is HALTED!\n");
475 memset(&cmd
, 0, sizeof(cmd
));
476 cmd
.cmd
= IDXD_CMD_RESET_DEVICE
;
477 dev_dbg(dev
, "%s: sending reset for init.\n", __func__
);
478 spin_lock(&idxd
->cmd_lock
);
479 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
481 while (ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
) &
484 spin_unlock(&idxd
->cmd_lock
);
488 static void idxd_cmd_exec(struct idxd_device
*idxd
, int cmd_code
, u32 operand
,
491 union idxd_command_reg cmd
;
492 DECLARE_COMPLETION_ONSTACK(done
);
495 if (idxd_device_is_halted(idxd
)) {
496 dev_warn(&idxd
->pdev
->dev
, "Device is HALTED!\n");
498 *status
= IDXD_CMDSTS_HW_ERR
;
502 memset(&cmd
, 0, sizeof(cmd
));
504 cmd
.operand
= operand
;
507 spin_lock(&idxd
->cmd_lock
);
508 wait_event_lock_irq(idxd
->cmd_waitq
,
509 !test_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
),
512 dev_dbg(&idxd
->pdev
->dev
, "%s: sending cmd: %#x op: %#x\n",
513 __func__
, cmd_code
, operand
);
515 idxd
->cmd_status
= 0;
516 __set_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
);
517 idxd
->cmd_done
= &done
;
518 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
521 * After command submitted, release lock and go to sleep until
522 * the command completes via interrupt.
524 spin_unlock(&idxd
->cmd_lock
);
525 wait_for_completion(&done
);
526 stat
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
527 spin_lock(&idxd
->cmd_lock
);
530 idxd
->cmd_status
= stat
& GENMASK(7, 0);
532 __clear_bit(IDXD_FLAG_CMD_RUNNING
, &idxd
->flags
);
533 /* Wake up other pending commands */
534 wake_up(&idxd
->cmd_waitq
);
535 spin_unlock(&idxd
->cmd_lock
);
538 int idxd_device_enable(struct idxd_device
*idxd
)
540 struct device
*dev
= &idxd
->pdev
->dev
;
543 if (idxd_is_enabled(idxd
)) {
544 dev_dbg(dev
, "Device already enabled\n");
548 idxd_cmd_exec(idxd
, IDXD_CMD_ENABLE_DEVICE
, 0, &status
);
550 /* If the command is successful or if the device was enabled */
551 if (status
!= IDXD_CMDSTS_SUCCESS
&&
552 status
!= IDXD_CMDSTS_ERR_DEV_ENABLED
) {
553 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
557 idxd
->state
= IDXD_DEV_ENABLED
;
561 int idxd_device_disable(struct idxd_device
*idxd
)
563 struct device
*dev
= &idxd
->pdev
->dev
;
566 if (!idxd_is_enabled(idxd
)) {
567 dev_dbg(dev
, "Device is not enabled\n");
571 idxd_cmd_exec(idxd
, IDXD_CMD_DISABLE_DEVICE
, 0, &status
);
573 /* If the command is successful or if the device was disabled */
574 if (status
!= IDXD_CMDSTS_SUCCESS
&&
575 !(status
& IDXD_CMDSTS_ERR_DIS_DEV_EN
)) {
576 dev_dbg(dev
, "%s: err_code: %#x\n", __func__
, status
);
580 idxd_device_clear_state(idxd
);
584 void idxd_device_reset(struct idxd_device
*idxd
)
586 idxd_cmd_exec(idxd
, IDXD_CMD_RESET_DEVICE
, 0, NULL
);
587 idxd_device_clear_state(idxd
);
588 spin_lock(&idxd
->dev_lock
);
589 idxd_unmask_error_interrupts(idxd
);
590 spin_unlock(&idxd
->dev_lock
);
593 void idxd_device_drain_pasid(struct idxd_device
*idxd
, int pasid
)
595 struct device
*dev
= &idxd
->pdev
->dev
;
599 dev_dbg(dev
, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID
, operand
);
600 idxd_cmd_exec(idxd
, IDXD_CMD_DRAIN_PASID
, operand
, NULL
);
601 dev_dbg(dev
, "pasid %d drained\n", pasid
);
604 int idxd_device_request_int_handle(struct idxd_device
*idxd
, int idx
, int *handle
,
605 enum idxd_interrupt_type irq_type
)
607 struct device
*dev
= &idxd
->pdev
->dev
;
610 if (!(idxd
->hw
.cmd_cap
& BIT(IDXD_CMD_REQUEST_INT_HANDLE
)))
613 dev_dbg(dev
, "get int handle, idx %d\n", idx
);
615 operand
= idx
& GENMASK(15, 0);
616 if (irq_type
== IDXD_IRQ_IMS
)
617 operand
|= CMD_INT_HANDLE_IMS
;
619 dev_dbg(dev
, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE
, operand
);
621 idxd_cmd_exec(idxd
, IDXD_CMD_REQUEST_INT_HANDLE
, operand
, &status
);
623 if ((status
& IDXD_CMDSTS_ERR_MASK
) != IDXD_CMDSTS_SUCCESS
) {
624 dev_dbg(dev
, "request int handle failed: %#x\n", status
);
628 *handle
= (status
>> IDXD_CMDSTS_RES_SHIFT
) & GENMASK(15, 0);
630 dev_dbg(dev
, "int handle acquired: %u\n", *handle
);
634 int idxd_device_release_int_handle(struct idxd_device
*idxd
, int handle
,
635 enum idxd_interrupt_type irq_type
)
637 struct device
*dev
= &idxd
->pdev
->dev
;
639 union idxd_command_reg cmd
;
641 if (!(idxd
->hw
.cmd_cap
& BIT(IDXD_CMD_RELEASE_INT_HANDLE
)))
644 dev_dbg(dev
, "release int handle, handle %d\n", handle
);
646 memset(&cmd
, 0, sizeof(cmd
));
647 operand
= handle
& GENMASK(15, 0);
649 if (irq_type
== IDXD_IRQ_IMS
)
650 operand
|= CMD_INT_HANDLE_IMS
;
652 cmd
.cmd
= IDXD_CMD_RELEASE_INT_HANDLE
;
653 cmd
.operand
= operand
;
655 dev_dbg(dev
, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE
, operand
);
657 spin_lock(&idxd
->cmd_lock
);
658 iowrite32(cmd
.bits
, idxd
->reg_base
+ IDXD_CMD_OFFSET
);
660 while (ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
) & IDXD_CMDSTS_ACTIVE
)
662 status
= ioread32(idxd
->reg_base
+ IDXD_CMDSTS_OFFSET
);
663 spin_unlock(&idxd
->cmd_lock
);
665 if ((status
& IDXD_CMDSTS_ERR_MASK
) != IDXD_CMDSTS_SUCCESS
) {
666 dev_dbg(dev
, "release int handle failed: %#x\n", status
);
670 dev_dbg(dev
, "int handle released.\n");
674 /* Device configuration bits */
675 static void idxd_engines_clear_state(struct idxd_device
*idxd
)
677 struct idxd_engine
*engine
;
680 lockdep_assert_held(&idxd
->dev_lock
);
681 for (i
= 0; i
< idxd
->max_engines
; i
++) {
682 engine
= idxd
->engines
[i
];
683 engine
->group
= NULL
;
687 static void idxd_groups_clear_state(struct idxd_device
*idxd
)
689 struct idxd_group
*group
;
692 lockdep_assert_held(&idxd
->dev_lock
);
693 for (i
= 0; i
< idxd
->max_groups
; i
++) {
694 group
= idxd
->groups
[i
];
695 memset(&group
->grpcfg
, 0, sizeof(group
->grpcfg
));
696 group
->num_engines
= 0;
698 group
->use_rdbuf_limit
= false;
699 group
->rdbufs_allowed
= 0;
700 group
->rdbufs_reserved
= 0;
701 if (idxd
->hw
.version
< DEVICE_VERSION_2
&& !tc_override
) {
711 static void idxd_device_wqs_clear_state(struct idxd_device
*idxd
)
715 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
716 struct idxd_wq
*wq
= idxd
->wqs
[i
];
718 mutex_lock(&wq
->wq_lock
);
719 if (wq
->state
== IDXD_WQ_ENABLED
) {
720 idxd_wq_disable_cleanup(wq
);
721 wq
->state
= IDXD_WQ_DISABLED
;
723 idxd_wq_device_reset_cleanup(wq
);
724 mutex_unlock(&wq
->wq_lock
);
728 void idxd_device_clear_state(struct idxd_device
*idxd
)
730 if (!test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
733 idxd_device_wqs_clear_state(idxd
);
734 spin_lock(&idxd
->dev_lock
);
735 idxd_groups_clear_state(idxd
);
736 idxd_engines_clear_state(idxd
);
737 idxd
->state
= IDXD_DEV_DISABLED
;
738 spin_unlock(&idxd
->dev_lock
);
741 static void idxd_group_config_write(struct idxd_group
*group
)
743 struct idxd_device
*idxd
= group
->idxd
;
744 struct device
*dev
= &idxd
->pdev
->dev
;
748 dev_dbg(dev
, "Writing group %d cfg registers\n", group
->id
);
751 for (i
= 0; i
< GRPWQCFG_STRIDES
; i
++) {
752 grpcfg_offset
= GRPWQCFG_OFFSET(idxd
, group
->id
, i
);
753 iowrite64(group
->grpcfg
.wqs
[i
], idxd
->reg_base
+ grpcfg_offset
);
754 dev_dbg(dev
, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
755 group
->id
, i
, grpcfg_offset
,
756 ioread64(idxd
->reg_base
+ grpcfg_offset
));
759 /* setup GRPENGCFG */
760 grpcfg_offset
= GRPENGCFG_OFFSET(idxd
, group
->id
);
761 iowrite64(group
->grpcfg
.engines
, idxd
->reg_base
+ grpcfg_offset
);
762 dev_dbg(dev
, "GRPCFG engs[%d: %#x]: %#llx\n", group
->id
,
763 grpcfg_offset
, ioread64(idxd
->reg_base
+ grpcfg_offset
));
766 grpcfg_offset
= GRPFLGCFG_OFFSET(idxd
, group
->id
);
767 iowrite32(group
->grpcfg
.flags
.bits
, idxd
->reg_base
+ grpcfg_offset
);
768 dev_dbg(dev
, "GRPFLAGS flags[%d: %#x]: %#x\n",
769 group
->id
, grpcfg_offset
,
770 ioread32(idxd
->reg_base
+ grpcfg_offset
));
773 static int idxd_groups_config_write(struct idxd_device
*idxd
)
776 union gencfg_reg reg
;
778 struct device
*dev
= &idxd
->pdev
->dev
;
780 /* Setup bandwidth rdbuf limit */
781 if (idxd
->hw
.gen_cap
.config_en
&& idxd
->rdbuf_limit
) {
782 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
783 reg
.rdbuf_limit
= idxd
->rdbuf_limit
;
784 iowrite32(reg
.bits
, idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
787 dev_dbg(dev
, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET
,
788 ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
));
790 for (i
= 0; i
< idxd
->max_groups
; i
++) {
791 struct idxd_group
*group
= idxd
->groups
[i
];
793 idxd_group_config_write(group
);
799 static bool idxd_device_pasid_priv_enabled(struct idxd_device
*idxd
)
801 struct pci_dev
*pdev
= idxd
->pdev
;
803 if (pdev
->pasid_enabled
&& (pdev
->pasid_features
& PCI_PASID_CAP_PRIV
))
808 static int idxd_wq_config_write(struct idxd_wq
*wq
)
810 struct idxd_device
*idxd
= wq
->idxd
;
811 struct device
*dev
= &idxd
->pdev
->dev
;
819 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
820 * wq reset. This will copy back the sticky values that are present on some devices.
822 for (i
= 0; i
< WQCFG_STRIDES(idxd
); i
++) {
823 wq_offset
= WQCFG_OFFSET(idxd
, wq
->id
, i
);
824 wq
->wqcfg
->bits
[i
] |= ioread32(idxd
->reg_base
+ wq_offset
);
827 if (wq
->size
== 0 && wq
->type
!= IDXD_WQT_NONE
)
828 wq
->size
= WQ_DEFAULT_QUEUE_DEPTH
;
831 wq
->wqcfg
->wq_size
= wq
->size
;
834 wq
->wqcfg
->wq_thresh
= wq
->threshold
;
837 if (wq_dedicated(wq
))
841 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
842 * WQ type is kernel to indicate privileged access. This setting only
843 * matters for dedicated WQ. According to the DSA spec:
844 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
845 * Privileged Mode Enable field of the PCI Express PASID capability
846 * is 0, this field must be 0.
848 * In the case of a dedicated kernel WQ that is not able to support
849 * the PASID cap, then the configuration will be rejected.
851 if (wq_dedicated(wq
) && wq
->wqcfg
->pasid_en
&&
852 !idxd_device_pasid_priv_enabled(idxd
) &&
853 wq
->type
== IDXD_WQT_KERNEL
) {
854 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_PRIV
;
858 wq
->wqcfg
->priority
= wq
->priority
;
860 if (idxd
->hw
.gen_cap
.block_on_fault
&&
861 test_bit(WQ_FLAG_BLOCK_ON_FAULT
, &wq
->flags
))
864 if (idxd
->hw
.wq_cap
.wq_ats_support
)
865 wq
->wqcfg
->wq_ats_disable
= wq
->ats_dis
;
868 wq
->wqcfg
->max_xfer_shift
= ilog2(wq
->max_xfer_bytes
);
869 wq
->wqcfg
->max_batch_shift
= ilog2(wq
->max_batch_size
);
871 dev_dbg(dev
, "WQ %d CFGs\n", wq
->id
);
872 for (i
= 0; i
< WQCFG_STRIDES(idxd
); i
++) {
873 wq_offset
= WQCFG_OFFSET(idxd
, wq
->id
, i
);
874 iowrite32(wq
->wqcfg
->bits
[i
], idxd
->reg_base
+ wq_offset
);
875 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n",
876 wq
->id
, i
, wq_offset
,
877 ioread32(idxd
->reg_base
+ wq_offset
));
883 static int idxd_wqs_config_write(struct idxd_device
*idxd
)
887 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
888 struct idxd_wq
*wq
= idxd
->wqs
[i
];
890 rc
= idxd_wq_config_write(wq
);
898 static void idxd_group_flags_setup(struct idxd_device
*idxd
)
902 /* TC-A 0 and TC-B 1 should be defaults */
903 for (i
= 0; i
< idxd
->max_groups
; i
++) {
904 struct idxd_group
*group
= idxd
->groups
[i
];
906 if (group
->tc_a
== -1)
907 group
->tc_a
= group
->grpcfg
.flags
.tc_a
= 0;
909 group
->grpcfg
.flags
.tc_a
= group
->tc_a
;
910 if (group
->tc_b
== -1)
911 group
->tc_b
= group
->grpcfg
.flags
.tc_b
= 1;
913 group
->grpcfg
.flags
.tc_b
= group
->tc_b
;
914 group
->grpcfg
.flags
.use_rdbuf_limit
= group
->use_rdbuf_limit
;
915 group
->grpcfg
.flags
.rdbufs_reserved
= group
->rdbufs_reserved
;
916 if (group
->rdbufs_allowed
)
917 group
->grpcfg
.flags
.rdbufs_allowed
= group
->rdbufs_allowed
;
919 group
->grpcfg
.flags
.rdbufs_allowed
= idxd
->max_rdbufs
;
923 static int idxd_engines_setup(struct idxd_device
*idxd
)
926 struct idxd_engine
*eng
;
927 struct idxd_group
*group
;
929 for (i
= 0; i
< idxd
->max_groups
; i
++) {
930 group
= idxd
->groups
[i
];
931 group
->grpcfg
.engines
= 0;
934 for (i
= 0; i
< idxd
->max_engines
; i
++) {
935 eng
= idxd
->engines
[i
];
941 group
->grpcfg
.engines
|= BIT(eng
->id
);
951 static int idxd_wqs_setup(struct idxd_device
*idxd
)
954 struct idxd_group
*group
;
955 int i
, j
, configured
= 0;
956 struct device
*dev
= &idxd
->pdev
->dev
;
958 for (i
= 0; i
< idxd
->max_groups
; i
++) {
959 group
= idxd
->groups
[i
];
960 for (j
= 0; j
< 4; j
++)
961 group
->grpcfg
.wqs
[j
] = 0;
964 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
971 if (wq_shared(wq
) && !wq_shared_supported(wq
)) {
972 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_SWQ_SUPPORT
;
973 dev_warn(dev
, "No shared wq support but configured.\n");
977 group
->grpcfg
.wqs
[wq
->id
/ 64] |= BIT(wq
->id
% 64);
981 if (configured
== 0) {
982 idxd
->cmd_status
= IDXD_SCMD_WQ_NONE_CONFIGURED
;
989 int idxd_device_config(struct idxd_device
*idxd
)
993 lockdep_assert_held(&idxd
->dev_lock
);
994 rc
= idxd_wqs_setup(idxd
);
998 rc
= idxd_engines_setup(idxd
);
1002 idxd_group_flags_setup(idxd
);
1004 rc
= idxd_wqs_config_write(idxd
);
1008 rc
= idxd_groups_config_write(idxd
);
1015 static int idxd_wq_load_config(struct idxd_wq
*wq
)
1017 struct idxd_device
*idxd
= wq
->idxd
;
1018 struct device
*dev
= &idxd
->pdev
->dev
;
1022 wqcfg_offset
= WQCFG_OFFSET(idxd
, wq
->id
, 0);
1023 memcpy_fromio(wq
->wqcfg
, idxd
->reg_base
+ wqcfg_offset
, idxd
->wqcfg_size
);
1025 wq
->size
= wq
->wqcfg
->wq_size
;
1026 wq
->threshold
= wq
->wqcfg
->wq_thresh
;
1028 /* The driver does not support shared WQ mode in read-only config yet */
1029 if (wq
->wqcfg
->mode
== 0 || wq
->wqcfg
->pasid_en
)
1032 set_bit(WQ_FLAG_DEDICATED
, &wq
->flags
);
1034 wq
->priority
= wq
->wqcfg
->priority
;
1036 wq
->max_xfer_bytes
= 1ULL << wq
->wqcfg
->max_xfer_shift
;
1037 wq
->max_batch_size
= 1ULL << wq
->wqcfg
->max_batch_shift
;
1039 for (i
= 0; i
< WQCFG_STRIDES(idxd
); i
++) {
1040 wqcfg_offset
= WQCFG_OFFSET(idxd
, wq
->id
, i
);
1041 dev_dbg(dev
, "WQ[%d][%d][%#x]: %#x\n", wq
->id
, i
, wqcfg_offset
, wq
->wqcfg
->bits
[i
]);
1047 static void idxd_group_load_config(struct idxd_group
*group
)
1049 struct idxd_device
*idxd
= group
->idxd
;
1050 struct device
*dev
= &idxd
->pdev
->dev
;
1051 int i
, j
, grpcfg_offset
;
1054 * Load WQS bit fields
1055 * Iterate through all 256 bits 64 bits at a time
1057 for (i
= 0; i
< GRPWQCFG_STRIDES
; i
++) {
1060 grpcfg_offset
= GRPWQCFG_OFFSET(idxd
, group
->id
, i
);
1061 group
->grpcfg
.wqs
[i
] = ioread64(idxd
->reg_base
+ grpcfg_offset
);
1062 dev_dbg(dev
, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1063 group
->id
, i
, grpcfg_offset
, group
->grpcfg
.wqs
[i
]);
1065 if (i
* 64 >= idxd
->max_wqs
)
1068 /* Iterate through all 64 bits and check for wq set */
1069 for (j
= 0; j
< 64; j
++) {
1070 int id
= i
* 64 + j
;
1072 /* No need to check beyond max wqs */
1073 if (id
>= idxd
->max_wqs
)
1076 /* Set group assignment for wq if wq bit is set */
1077 if (group
->grpcfg
.wqs
[i
] & BIT(j
)) {
1084 grpcfg_offset
= GRPENGCFG_OFFSET(idxd
, group
->id
);
1085 group
->grpcfg
.engines
= ioread64(idxd
->reg_base
+ grpcfg_offset
);
1086 dev_dbg(dev
, "GRPCFG engs[%d: %#x]: %#llx\n", group
->id
,
1087 grpcfg_offset
, group
->grpcfg
.engines
);
1089 /* Iterate through all 64 bits to check engines set */
1090 for (i
= 0; i
< 64; i
++) {
1091 if (i
>= idxd
->max_engines
)
1094 if (group
->grpcfg
.engines
& BIT(i
)) {
1095 struct idxd_engine
*engine
= idxd
->engines
[i
];
1097 engine
->group
= group
;
1101 grpcfg_offset
= GRPFLGCFG_OFFSET(idxd
, group
->id
);
1102 group
->grpcfg
.flags
.bits
= ioread32(idxd
->reg_base
+ grpcfg_offset
);
1103 dev_dbg(dev
, "GRPFLAGS flags[%d: %#x]: %#x\n",
1104 group
->id
, grpcfg_offset
, group
->grpcfg
.flags
.bits
);
1107 int idxd_device_load_config(struct idxd_device
*idxd
)
1109 union gencfg_reg reg
;
1112 reg
.bits
= ioread32(idxd
->reg_base
+ IDXD_GENCFG_OFFSET
);
1113 idxd
->rdbuf_limit
= reg
.rdbuf_limit
;
1115 for (i
= 0; i
< idxd
->max_groups
; i
++) {
1116 struct idxd_group
*group
= idxd
->groups
[i
];
1118 idxd_group_load_config(group
);
1121 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1122 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1124 rc
= idxd_wq_load_config(wq
);
1132 static void idxd_flush_pending_descs(struct idxd_irq_entry
*ie
)
1134 struct idxd_desc
*desc
, *itr
;
1135 struct llist_node
*head
;
1137 enum idxd_complete_type ctype
;
1139 spin_lock(&ie
->list_lock
);
1140 head
= llist_del_all(&ie
->pending_llist
);
1142 llist_for_each_entry_safe(desc
, itr
, head
, llnode
)
1143 list_add_tail(&desc
->list
, &ie
->work_list
);
1146 list_for_each_entry_safe(desc
, itr
, &ie
->work_list
, list
)
1147 list_move_tail(&desc
->list
, &flist
);
1148 spin_unlock(&ie
->list_lock
);
1150 list_for_each_entry_safe(desc
, itr
, &flist
, list
) {
1151 list_del(&desc
->list
);
1152 ctype
= desc
->completion
->status
? IDXD_COMPLETE_NORMAL
: IDXD_COMPLETE_ABORT
;
1153 idxd_dma_complete_txd(desc
, ctype
, true);
1157 static void idxd_device_set_perm_entry(struct idxd_device
*idxd
,
1158 struct idxd_irq_entry
*ie
)
1160 union msix_perm mperm
;
1162 if (ie
->pasid
== INVALID_IOASID
)
1166 mperm
.pasid
= ie
->pasid
;
1168 iowrite32(mperm
.bits
, idxd
->reg_base
+ idxd
->msix_perm_offset
+ ie
->id
* 8);
1171 static void idxd_device_clear_perm_entry(struct idxd_device
*idxd
,
1172 struct idxd_irq_entry
*ie
)
1174 iowrite32(0, idxd
->reg_base
+ idxd
->msix_perm_offset
+ ie
->id
* 8);
1177 void idxd_wq_free_irq(struct idxd_wq
*wq
)
1179 struct idxd_device
*idxd
= wq
->idxd
;
1180 struct idxd_irq_entry
*ie
= &wq
->ie
;
1182 if (wq
->type
!= IDXD_WQT_KERNEL
)
1185 free_irq(ie
->vector
, ie
);
1186 idxd_flush_pending_descs(ie
);
1187 if (idxd
->request_int_handles
)
1188 idxd_device_release_int_handle(idxd
, ie
->int_handle
, IDXD_IRQ_MSIX
);
1189 idxd_device_clear_perm_entry(idxd
, ie
);
1191 ie
->int_handle
= INVALID_INT_HANDLE
;
1192 ie
->pasid
= INVALID_IOASID
;
1195 int idxd_wq_request_irq(struct idxd_wq
*wq
)
1197 struct idxd_device
*idxd
= wq
->idxd
;
1198 struct pci_dev
*pdev
= idxd
->pdev
;
1199 struct device
*dev
= &pdev
->dev
;
1200 struct idxd_irq_entry
*ie
;
1203 if (wq
->type
!= IDXD_WQT_KERNEL
)
1207 ie
->vector
= pci_irq_vector(pdev
, ie
->id
);
1208 ie
->pasid
= device_pasid_enabled(idxd
) ? idxd
->pasid
: INVALID_IOASID
;
1209 idxd_device_set_perm_entry(idxd
, ie
);
1211 rc
= request_threaded_irq(ie
->vector
, NULL
, idxd_wq_thread
, 0, "idxd-portal", ie
);
1213 dev_err(dev
, "Failed to request irq %d.\n", ie
->vector
);
1217 if (idxd
->request_int_handles
) {
1218 rc
= idxd_device_request_int_handle(idxd
, ie
->id
, &ie
->int_handle
,
1221 goto err_int_handle
;
1223 ie
->int_handle
= ie
->id
;
1229 ie
->int_handle
= INVALID_INT_HANDLE
;
1230 free_irq(ie
->vector
, ie
);
1232 idxd_device_clear_perm_entry(idxd
, ie
);
1233 ie
->pasid
= INVALID_IOASID
;
1237 int drv_enable_wq(struct idxd_wq
*wq
)
1239 struct idxd_device
*idxd
= wq
->idxd
;
1240 struct device
*dev
= &idxd
->pdev
->dev
;
1243 lockdep_assert_held(&wq
->wq_lock
);
1245 if (idxd
->state
!= IDXD_DEV_ENABLED
) {
1246 idxd
->cmd_status
= IDXD_SCMD_DEV_NOT_ENABLED
;
1250 if (wq
->state
!= IDXD_WQ_DISABLED
) {
1251 dev_dbg(dev
, "wq %d already enabled.\n", wq
->id
);
1252 idxd
->cmd_status
= IDXD_SCMD_WQ_ENABLED
;
1258 dev_dbg(dev
, "wq %d not attached to group.\n", wq
->id
);
1259 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_GRP
;
1263 if (strlen(wq
->name
) == 0) {
1264 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_NAME
;
1265 dev_dbg(dev
, "wq %d name not set.\n", wq
->id
);
1269 /* Shared WQ checks */
1270 if (wq_shared(wq
)) {
1271 if (!wq_shared_supported(wq
)) {
1272 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_SVM
;
1273 dev_dbg(dev
, "PASID not enabled and shared wq.\n");
1277 * Shared wq with the threshold set to 0 means the user
1278 * did not set the threshold or transitioned from a
1279 * dedicated wq but did not set threshold. A value
1280 * of 0 would effectively disable the shared wq. The
1281 * driver does not allow a value of 0 to be set for
1282 * threshold via sysfs.
1284 if (wq
->threshold
== 0) {
1285 idxd
->cmd_status
= IDXD_SCMD_WQ_NO_THRESH
;
1286 dev_dbg(dev
, "Shared wq and threshold 0.\n");
1292 * In the event that the WQ is configurable for pasid and priv bits.
1293 * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
1294 * However, for non-kernel wq, the driver should only set the pasid_en bit for
1295 * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
1296 * pasid_en later on so there is no need to setup.
1298 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
)) {
1301 if (wq_pasid_enabled(wq
)) {
1302 if (is_idxd_wq_kernel(wq
) || wq_shared(wq
)) {
1303 u32 pasid
= wq_dedicated(wq
) ? idxd
->pasid
: 0;
1305 __idxd_wq_set_pasid_locked(wq
, pasid
);
1309 if (is_idxd_wq_kernel(wq
))
1311 __idxd_wq_set_priv_locked(wq
, priv
);
1315 spin_lock(&idxd
->dev_lock
);
1316 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1317 rc
= idxd_device_config(idxd
);
1318 spin_unlock(&idxd
->dev_lock
);
1320 dev_dbg(dev
, "Writing wq %d config failed: %d\n", wq
->id
, rc
);
1324 rc
= idxd_wq_enable(wq
);
1326 dev_dbg(dev
, "wq %d enabling failed: %d\n", wq
->id
, rc
);
1330 rc
= idxd_wq_map_portal(wq
);
1332 idxd
->cmd_status
= IDXD_SCMD_WQ_PORTAL_ERR
;
1333 dev_dbg(dev
, "wq %d portal mapping failed: %d\n", wq
->id
, rc
);
1334 goto err_map_portal
;
1337 wq
->client_count
= 0;
1339 rc
= idxd_wq_request_irq(wq
);
1341 idxd
->cmd_status
= IDXD_SCMD_WQ_IRQ_ERR
;
1342 dev_dbg(dev
, "WQ %d irq setup failed: %d\n", wq
->id
, rc
);
1346 rc
= idxd_wq_alloc_resources(wq
);
1348 idxd
->cmd_status
= IDXD_SCMD_WQ_RES_ALLOC_ERR
;
1349 dev_dbg(dev
, "WQ resource alloc failed\n");
1353 rc
= idxd_wq_init_percpu_ref(wq
);
1355 idxd
->cmd_status
= IDXD_SCMD_PERCPU_ERR
;
1356 dev_dbg(dev
, "percpu_ref setup failed\n");
1363 idxd_wq_free_resources(wq
);
1365 idxd_wq_free_irq(wq
);
1367 idxd_wq_unmap_portal(wq
);
1369 rc
= idxd_wq_disable(wq
, false);
1371 dev_dbg(dev
, "wq %s disable failed\n", dev_name(wq_confdev(wq
)));
1376 void drv_disable_wq(struct idxd_wq
*wq
)
1378 struct idxd_device
*idxd
= wq
->idxd
;
1379 struct device
*dev
= &idxd
->pdev
->dev
;
1381 lockdep_assert_held(&wq
->wq_lock
);
1383 if (idxd_wq_refcount(wq
))
1384 dev_warn(dev
, "Clients has claim on wq %d: %d\n",
1385 wq
->id
, idxd_wq_refcount(wq
));
1387 idxd_wq_free_resources(wq
);
1388 idxd_wq_unmap_portal(wq
);
1390 idxd_wq_free_irq(wq
);
1392 percpu_ref_exit(&wq
->wq_active
);
1393 wq
->type
= IDXD_WQT_NONE
;
1394 wq
->client_count
= 0;
1397 int idxd_device_drv_probe(struct idxd_dev
*idxd_dev
)
1399 struct idxd_device
*idxd
= idxd_dev_to_idxd(idxd_dev
);
1403 * Device should be in disabled state for the idxd_drv to load. If it's in
1404 * enabled state, then the device was altered outside of driver's control.
1405 * If the state is in halted state, then we don't want to proceed.
1407 if (idxd
->state
!= IDXD_DEV_DISABLED
) {
1408 idxd
->cmd_status
= IDXD_SCMD_DEV_ENABLED
;
1412 /* Device configuration */
1413 spin_lock(&idxd
->dev_lock
);
1414 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1415 rc
= idxd_device_config(idxd
);
1416 spin_unlock(&idxd
->dev_lock
);
1421 rc
= idxd_device_enable(idxd
);
1425 /* Setup DMA device without channels */
1426 rc
= idxd_register_dma_device(idxd
);
1428 idxd_device_disable(idxd
);
1429 idxd
->cmd_status
= IDXD_SCMD_DEV_DMA_ERR
;
1433 idxd
->cmd_status
= 0;
1437 void idxd_device_drv_remove(struct idxd_dev
*idxd_dev
)
1439 struct device
*dev
= &idxd_dev
->conf_dev
;
1440 struct idxd_device
*idxd
= idxd_dev_to_idxd(idxd_dev
);
1443 for (i
= 0; i
< idxd
->max_wqs
; i
++) {
1444 struct idxd_wq
*wq
= idxd
->wqs
[i
];
1445 struct device
*wq_dev
= wq_confdev(wq
);
1447 if (wq
->state
== IDXD_WQ_DISABLED
)
1449 dev_warn(dev
, "Active wq %d on disable %s.\n", i
, dev_name(wq_dev
));
1450 device_release_driver(wq_dev
);
1453 idxd_unregister_dma_device(idxd
);
1454 idxd_device_disable(idxd
);
1455 if (test_bit(IDXD_FLAG_CONFIGURABLE
, &idxd
->flags
))
1456 idxd_device_reset(idxd
);
1459 static enum idxd_dev_type dev_types
[] = {
1465 struct idxd_device_driver idxd_drv
= {
1467 .probe
= idxd_device_drv_probe
,
1468 .remove
= idxd_device_drv_remove
,
1471 EXPORT_SYMBOL_GPL(idxd_drv
);