2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
30 #include <linux/aer.h>
31 #include <linux/sizes.h>
33 #include "registers.h"
36 #include "../dmaengine.h"
38 MODULE_VERSION(IOAT_DMA_VERSION
);
39 MODULE_LICENSE("Dual BSD/GPL");
40 MODULE_AUTHOR("Intel Corporation");
42 static struct pci_device_id ioat_pci_tbl
[] = {
43 /* I/OAT v3 platforms */
44 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG0
) },
45 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG1
) },
46 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG2
) },
47 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG3
) },
48 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG4
) },
49 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG5
) },
50 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG6
) },
51 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_TBG7
) },
53 /* I/OAT v3.2 platforms */
54 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF0
) },
55 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF1
) },
56 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF2
) },
57 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF3
) },
58 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF4
) },
59 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF5
) },
60 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF6
) },
61 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF7
) },
62 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF8
) },
63 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_JSF9
) },
65 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB0
) },
66 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB1
) },
67 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB2
) },
68 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB3
) },
69 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB4
) },
70 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB5
) },
71 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB6
) },
72 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB7
) },
73 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB8
) },
74 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_SNB9
) },
76 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB0
) },
77 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB1
) },
78 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB2
) },
79 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB3
) },
80 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB4
) },
81 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB5
) },
82 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB6
) },
83 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB7
) },
84 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB8
) },
85 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_IVB9
) },
87 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW0
) },
88 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW1
) },
89 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW2
) },
90 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW3
) },
91 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW4
) },
92 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW5
) },
93 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW6
) },
94 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW7
) },
95 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW8
) },
96 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_HSW9
) },
98 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX0
) },
99 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX1
) },
100 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX2
) },
101 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX3
) },
102 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX4
) },
103 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX5
) },
104 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX6
) },
105 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX7
) },
106 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX8
) },
107 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDX9
) },
109 /* I/OAT v3.3 platforms */
110 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BWD0
) },
111 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BWD1
) },
112 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BWD2
) },
113 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BWD3
) },
115 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0
) },
116 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1
) },
117 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2
) },
118 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3
) },
122 MODULE_DEVICE_TABLE(pci
, ioat_pci_tbl
);
124 static int ioat_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
125 static void ioat_remove(struct pci_dev
*pdev
);
127 ioat_init_channel(struct ioatdma_device
*ioat_dma
,
128 struct ioatdma_chan
*ioat_chan
, int idx
);
129 static void ioat_intr_quirk(struct ioatdma_device
*ioat_dma
);
130 static int ioat_enumerate_channels(struct ioatdma_device
*ioat_dma
);
131 static int ioat3_dma_self_test(struct ioatdma_device
*ioat_dma
);
133 static int ioat_dca_enabled
= 1;
134 module_param(ioat_dca_enabled
, int, 0644);
135 MODULE_PARM_DESC(ioat_dca_enabled
, "control support of dca service (default: 1)");
136 int ioat_pending_level
= 4;
137 module_param(ioat_pending_level
, int, 0644);
138 MODULE_PARM_DESC(ioat_pending_level
,
139 "high-water mark for pushing ioat descriptors (default: 4)");
140 static char ioat_interrupt_style
[32] = "msix";
141 module_param_string(ioat_interrupt_style
, ioat_interrupt_style
,
142 sizeof(ioat_interrupt_style
), 0644);
143 MODULE_PARM_DESC(ioat_interrupt_style
,
144 "set ioat interrupt style: msix (default), msi, intx");
146 struct kmem_cache
*ioat_cache
;
147 struct kmem_cache
*ioat_sed_cache
;
149 static bool is_jf_ioat(struct pci_dev
*pdev
)
151 switch (pdev
->device
) {
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF0
:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF1
:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF2
:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF3
:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF4
:
157 case PCI_DEVICE_ID_INTEL_IOAT_JSF5
:
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF6
:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF7
:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF8
:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF9
:
168 static bool is_snb_ioat(struct pci_dev
*pdev
)
170 switch (pdev
->device
) {
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB0
:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB1
:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB2
:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB3
:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB4
:
176 case PCI_DEVICE_ID_INTEL_IOAT_SNB5
:
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB6
:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB7
:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB8
:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB9
:
187 static bool is_ivb_ioat(struct pci_dev
*pdev
)
189 switch (pdev
->device
) {
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB0
:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB1
:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB2
:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB3
:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB4
:
195 case PCI_DEVICE_ID_INTEL_IOAT_IVB5
:
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB6
:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB7
:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB8
:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB9
:
207 static bool is_hsw_ioat(struct pci_dev
*pdev
)
209 switch (pdev
->device
) {
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW0
:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW1
:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW2
:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW3
:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW4
:
215 case PCI_DEVICE_ID_INTEL_IOAT_HSW5
:
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW6
:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW7
:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW8
:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW9
:
227 static bool is_bdx_ioat(struct pci_dev
*pdev
)
229 switch (pdev
->device
) {
230 case PCI_DEVICE_ID_INTEL_IOAT_BDX0
:
231 case PCI_DEVICE_ID_INTEL_IOAT_BDX1
:
232 case PCI_DEVICE_ID_INTEL_IOAT_BDX2
:
233 case PCI_DEVICE_ID_INTEL_IOAT_BDX3
:
234 case PCI_DEVICE_ID_INTEL_IOAT_BDX4
:
235 case PCI_DEVICE_ID_INTEL_IOAT_BDX5
:
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX6
:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX7
:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX8
:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX9
:
246 static bool is_xeon_cb32(struct pci_dev
*pdev
)
248 return is_jf_ioat(pdev
) || is_snb_ioat(pdev
) || is_ivb_ioat(pdev
) ||
249 is_hsw_ioat(pdev
) || is_bdx_ioat(pdev
);
252 bool is_bwd_ioat(struct pci_dev
*pdev
)
254 switch (pdev
->device
) {
255 case PCI_DEVICE_ID_INTEL_IOAT_BWD0
:
256 case PCI_DEVICE_ID_INTEL_IOAT_BWD1
:
257 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
258 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
259 /* even though not Atom, BDX-DE has same DMA silicon */
260 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0
:
261 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1
:
262 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2
:
263 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3
:
270 static bool is_bwd_noraid(struct pci_dev
*pdev
)
272 switch (pdev
->device
) {
273 case PCI_DEVICE_ID_INTEL_IOAT_BWD2
:
274 case PCI_DEVICE_ID_INTEL_IOAT_BWD3
:
275 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0
:
276 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1
:
277 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2
:
278 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3
:
287 * Perform a IOAT transaction to verify the HW works.
289 #define IOAT_TEST_SIZE 2000
291 static void ioat_dma_test_callback(void *dma_async_param
)
293 struct completion
*cmp
= dma_async_param
;
299 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
300 * @ioat_dma: dma device to be tested
302 static int ioat_dma_self_test(struct ioatdma_device
*ioat_dma
)
307 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
308 struct device
*dev
= &ioat_dma
->pdev
->dev
;
309 struct dma_chan
*dma_chan
;
310 struct dma_async_tx_descriptor
*tx
;
311 dma_addr_t dma_dest
, dma_src
;
314 struct completion cmp
;
318 src
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
321 dest
= kzalloc(sizeof(u8
) * IOAT_TEST_SIZE
, GFP_KERNEL
);
327 /* Fill in src buffer */
328 for (i
= 0; i
< IOAT_TEST_SIZE
; i
++)
331 /* Start copy, using first DMA channel */
332 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
334 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
335 dev_err(dev
, "selftest cannot allocate chan resource\n");
340 dma_src
= dma_map_single(dev
, src
, IOAT_TEST_SIZE
, DMA_TO_DEVICE
);
341 if (dma_mapping_error(dev
, dma_src
)) {
342 dev_err(dev
, "mapping src buffer failed\n");
346 dma_dest
= dma_map_single(dev
, dest
, IOAT_TEST_SIZE
, DMA_FROM_DEVICE
);
347 if (dma_mapping_error(dev
, dma_dest
)) {
348 dev_err(dev
, "mapping dest buffer failed\n");
352 flags
= DMA_PREP_INTERRUPT
;
353 tx
= ioat_dma
->dma_dev
.device_prep_dma_memcpy(dma_chan
, dma_dest
,
354 dma_src
, IOAT_TEST_SIZE
,
357 dev_err(dev
, "Self-test prep failed, disabling\n");
363 init_completion(&cmp
);
364 tx
->callback
= ioat_dma_test_callback
;
365 tx
->callback_param
= &cmp
;
366 cookie
= tx
->tx_submit(tx
);
368 dev_err(dev
, "Self-test setup failed, disabling\n");
372 dma
->device_issue_pending(dma_chan
);
374 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
377 dma
->device_tx_status(dma_chan
, cookie
, NULL
)
379 dev_err(dev
, "Self-test copy timed out, disabling\n");
383 if (memcmp(src
, dest
, IOAT_TEST_SIZE
)) {
384 dev_err(dev
, "Self-test copy failed compare, disabling\n");
390 dma_unmap_single(dev
, dma_dest
, IOAT_TEST_SIZE
, DMA_FROM_DEVICE
);
392 dma_unmap_single(dev
, dma_src
, IOAT_TEST_SIZE
, DMA_TO_DEVICE
);
394 dma
->device_free_chan_resources(dma_chan
);
402 * ioat_dma_setup_interrupts - setup interrupt handler
403 * @ioat_dma: ioat dma device
405 int ioat_dma_setup_interrupts(struct ioatdma_device
*ioat_dma
)
407 struct ioatdma_chan
*ioat_chan
;
408 struct pci_dev
*pdev
= ioat_dma
->pdev
;
409 struct device
*dev
= &pdev
->dev
;
410 struct msix_entry
*msix
;
415 if (!strcmp(ioat_interrupt_style
, "msix"))
417 if (!strcmp(ioat_interrupt_style
, "msi"))
419 if (!strcmp(ioat_interrupt_style
, "intx"))
421 dev_err(dev
, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style
);
425 /* The number of MSI-X vectors should equal the number of channels */
426 msixcnt
= ioat_dma
->dma_dev
.chancnt
;
427 for (i
= 0; i
< msixcnt
; i
++)
428 ioat_dma
->msix_entries
[i
].entry
= i
;
430 err
= pci_enable_msix_exact(pdev
, ioat_dma
->msix_entries
, msixcnt
);
434 for (i
= 0; i
< msixcnt
; i
++) {
435 msix
= &ioat_dma
->msix_entries
[i
];
436 ioat_chan
= ioat_chan_by_index(ioat_dma
, i
);
437 err
= devm_request_irq(dev
, msix
->vector
,
438 ioat_dma_do_interrupt_msix
, 0,
439 "ioat-msix", ioat_chan
);
441 for (j
= 0; j
< i
; j
++) {
442 msix
= &ioat_dma
->msix_entries
[j
];
443 ioat_chan
= ioat_chan_by_index(ioat_dma
, j
);
444 devm_free_irq(dev
, msix
->vector
, ioat_chan
);
449 intrctrl
|= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL
;
450 ioat_dma
->irq_mode
= IOAT_MSIX
;
454 err
= pci_enable_msi(pdev
);
458 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
, 0,
459 "ioat-msi", ioat_dma
);
461 pci_disable_msi(pdev
);
464 ioat_dma
->irq_mode
= IOAT_MSI
;
468 err
= devm_request_irq(dev
, pdev
->irq
, ioat_dma_do_interrupt
,
469 IRQF_SHARED
, "ioat-intx", ioat_dma
);
473 ioat_dma
->irq_mode
= IOAT_INTX
;
475 if (is_bwd_ioat(pdev
))
476 ioat_intr_quirk(ioat_dma
);
477 intrctrl
|= IOAT_INTRCTRL_MASTER_INT_EN
;
478 writeb(intrctrl
, ioat_dma
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
482 /* Disable all interrupt generation */
483 writeb(0, ioat_dma
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
484 ioat_dma
->irq_mode
= IOAT_NOIRQ
;
485 dev_err(dev
, "no usable interrupts\n");
489 static void ioat_disable_interrupts(struct ioatdma_device
*ioat_dma
)
491 /* Disable all interrupt generation */
492 writeb(0, ioat_dma
->reg_base
+ IOAT_INTRCTRL_OFFSET
);
495 static int ioat_probe(struct ioatdma_device
*ioat_dma
)
498 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
499 struct pci_dev
*pdev
= ioat_dma
->pdev
;
500 struct device
*dev
= &pdev
->dev
;
502 ioat_dma
->completion_pool
= dma_pool_create("completion_pool", dev
,
507 if (!ioat_dma
->completion_pool
) {
512 ioat_enumerate_channels(ioat_dma
);
514 dma_cap_set(DMA_MEMCPY
, dma
->cap_mask
);
515 dma
->dev
= &pdev
->dev
;
518 dev_err(dev
, "channel enumeration error\n");
519 goto err_setup_interrupts
;
522 err
= ioat_dma_setup_interrupts(ioat_dma
);
524 goto err_setup_interrupts
;
526 err
= ioat3_dma_self_test(ioat_dma
);
533 ioat_disable_interrupts(ioat_dma
);
534 err_setup_interrupts
:
535 dma_pool_destroy(ioat_dma
->completion_pool
);
540 static int ioat_register(struct ioatdma_device
*ioat_dma
)
542 int err
= dma_async_device_register(&ioat_dma
->dma_dev
);
545 ioat_disable_interrupts(ioat_dma
);
546 dma_pool_destroy(ioat_dma
->completion_pool
);
552 static void ioat_dma_remove(struct ioatdma_device
*ioat_dma
)
554 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
556 ioat_disable_interrupts(ioat_dma
);
558 ioat_kobject_del(ioat_dma
);
560 dma_async_device_unregister(dma
);
562 dma_pool_destroy(ioat_dma
->completion_pool
);
564 INIT_LIST_HEAD(&dma
->channels
);
568 * ioat_enumerate_channels - find and initialize the device's channels
569 * @ioat_dma: the ioat dma device to be enumerated
571 static int ioat_enumerate_channels(struct ioatdma_device
*ioat_dma
)
573 struct ioatdma_chan
*ioat_chan
;
574 struct device
*dev
= &ioat_dma
->pdev
->dev
;
575 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
579 INIT_LIST_HEAD(&dma
->channels
);
580 dma
->chancnt
= readb(ioat_dma
->reg_base
+ IOAT_CHANCNT_OFFSET
);
581 dma
->chancnt
&= 0x1f; /* bits [4:0] valid */
582 if (dma
->chancnt
> ARRAY_SIZE(ioat_dma
->idx
)) {
583 dev_warn(dev
, "(%d) exceeds max supported channels (%zu)\n",
584 dma
->chancnt
, ARRAY_SIZE(ioat_dma
->idx
));
585 dma
->chancnt
= ARRAY_SIZE(ioat_dma
->idx
);
587 xfercap_log
= readb(ioat_dma
->reg_base
+ IOAT_XFERCAP_OFFSET
);
588 xfercap_log
&= 0x1f; /* bits [4:0] valid */
589 if (xfercap_log
== 0)
591 dev_dbg(dev
, "%s: xfercap = %d\n", __func__
, 1 << xfercap_log
);
593 for (i
= 0; i
< dma
->chancnt
; i
++) {
594 ioat_chan
= devm_kzalloc(dev
, sizeof(*ioat_chan
), GFP_KERNEL
);
598 ioat_init_channel(ioat_dma
, ioat_chan
, i
);
599 ioat_chan
->xfercap_log
= xfercap_log
;
600 spin_lock_init(&ioat_chan
->prep_lock
);
601 if (ioat_reset_hw(ioat_chan
)) {
611 * ioat_free_chan_resources - release all the descriptors
612 * @chan: the channel to be cleaned
614 static void ioat_free_chan_resources(struct dma_chan
*c
)
616 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
617 struct ioatdma_device
*ioat_dma
= ioat_chan
->ioat_dma
;
618 struct ioat_ring_ent
*desc
;
619 const int total_descs
= 1 << ioat_chan
->alloc_order
;
623 /* Before freeing channel resources first check
624 * if they have been previously allocated for this channel.
626 if (!ioat_chan
->ring
)
629 ioat_stop(ioat_chan
);
630 ioat_reset_hw(ioat_chan
);
632 spin_lock_bh(&ioat_chan
->cleanup_lock
);
633 spin_lock_bh(&ioat_chan
->prep_lock
);
634 descs
= ioat_ring_space(ioat_chan
);
635 dev_dbg(to_dev(ioat_chan
), "freeing %d idle descriptors\n", descs
);
636 for (i
= 0; i
< descs
; i
++) {
637 desc
= ioat_get_ring_ent(ioat_chan
, ioat_chan
->head
+ i
);
638 ioat_free_ring_ent(desc
, c
);
641 if (descs
< total_descs
)
642 dev_err(to_dev(ioat_chan
), "Freeing %d in use descriptors!\n",
643 total_descs
- descs
);
645 for (i
= 0; i
< total_descs
- descs
; i
++) {
646 desc
= ioat_get_ring_ent(ioat_chan
, ioat_chan
->tail
+ i
);
647 dump_desc_dbg(ioat_chan
, desc
);
648 ioat_free_ring_ent(desc
, c
);
651 for (i
= 0; i
< ioat_chan
->desc_chunks
; i
++) {
652 dma_free_coherent(to_dev(ioat_chan
), SZ_2M
,
653 ioat_chan
->descs
[i
].virt
,
654 ioat_chan
->descs
[i
].hw
);
655 ioat_chan
->descs
[i
].virt
= NULL
;
656 ioat_chan
->descs
[i
].hw
= 0;
658 ioat_chan
->desc_chunks
= 0;
660 kfree(ioat_chan
->ring
);
661 ioat_chan
->ring
= NULL
;
662 ioat_chan
->alloc_order
= 0;
663 dma_pool_free(ioat_dma
->completion_pool
, ioat_chan
->completion
,
664 ioat_chan
->completion_dma
);
665 spin_unlock_bh(&ioat_chan
->prep_lock
);
666 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
668 ioat_chan
->last_completion
= 0;
669 ioat_chan
->completion_dma
= 0;
670 ioat_chan
->dmacount
= 0;
673 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
674 * @chan: channel to be initialized
676 static int ioat_alloc_chan_resources(struct dma_chan
*c
)
678 struct ioatdma_chan
*ioat_chan
= to_ioat_chan(c
);
679 struct ioat_ring_ent
**ring
;
685 /* have we already been set up? */
687 return 1 << ioat_chan
->alloc_order
;
689 /* Setup register to interrupt and write completion status on error */
690 writew(IOAT_CHANCTRL_RUN
, ioat_chan
->reg_base
+ IOAT_CHANCTRL_OFFSET
);
692 /* allocate a completion writeback area */
693 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
694 ioat_chan
->completion
=
695 dma_pool_zalloc(ioat_chan
->ioat_dma
->completion_pool
,
696 GFP_KERNEL
, &ioat_chan
->completion_dma
);
697 if (!ioat_chan
->completion
)
700 writel(((u64
)ioat_chan
->completion_dma
) & 0x00000000FFFFFFFF,
701 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_LOW
);
702 writel(((u64
)ioat_chan
->completion_dma
) >> 32,
703 ioat_chan
->reg_base
+ IOAT_CHANCMP_OFFSET_HIGH
);
705 order
= IOAT_MAX_ORDER
;
706 ring
= ioat_alloc_ring(c
, order
, GFP_KERNEL
);
710 spin_lock_bh(&ioat_chan
->cleanup_lock
);
711 spin_lock_bh(&ioat_chan
->prep_lock
);
712 ioat_chan
->ring
= ring
;
714 ioat_chan
->issued
= 0;
716 ioat_chan
->alloc_order
= order
;
717 set_bit(IOAT_RUN
, &ioat_chan
->state
);
718 spin_unlock_bh(&ioat_chan
->prep_lock
);
719 spin_unlock_bh(&ioat_chan
->cleanup_lock
);
721 ioat_start_null_desc(ioat_chan
);
723 /* check that we got off the ground */
726 status
= ioat_chansts(ioat_chan
);
727 } while (i
++ < 20 && !is_ioat_active(status
) && !is_ioat_idle(status
));
729 if (is_ioat_active(status
) || is_ioat_idle(status
))
730 return 1 << ioat_chan
->alloc_order
;
732 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
734 dev_WARN(to_dev(ioat_chan
),
735 "failed to start channel chanerr: %#x\n", chanerr
);
736 ioat_free_chan_resources(c
);
740 /* common channel initialization */
742 ioat_init_channel(struct ioatdma_device
*ioat_dma
,
743 struct ioatdma_chan
*ioat_chan
, int idx
)
745 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
746 struct dma_chan
*c
= &ioat_chan
->dma_chan
;
747 unsigned long data
= (unsigned long) c
;
749 ioat_chan
->ioat_dma
= ioat_dma
;
750 ioat_chan
->reg_base
= ioat_dma
->reg_base
+ (0x80 * (idx
+ 1));
751 spin_lock_init(&ioat_chan
->cleanup_lock
);
752 ioat_chan
->dma_chan
.device
= dma
;
753 dma_cookie_init(&ioat_chan
->dma_chan
);
754 list_add_tail(&ioat_chan
->dma_chan
.device_node
, &dma
->channels
);
755 ioat_dma
->idx
[idx
] = ioat_chan
;
756 init_timer(&ioat_chan
->timer
);
757 ioat_chan
->timer
.function
= ioat_timer_event
;
758 ioat_chan
->timer
.data
= data
;
759 tasklet_init(&ioat_chan
->cleanup_task
, ioat_cleanup_event
, data
);
762 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
763 static int ioat_xor_val_self_test(struct ioatdma_device
*ioat_dma
)
767 struct page
*xor_srcs
[IOAT_NUM_SRC_TEST
];
768 struct page
*xor_val_srcs
[IOAT_NUM_SRC_TEST
+ 1];
769 dma_addr_t dma_srcs
[IOAT_NUM_SRC_TEST
+ 1];
771 struct dma_async_tx_descriptor
*tx
;
772 struct dma_chan
*dma_chan
;
778 struct completion cmp
;
780 struct device
*dev
= &ioat_dma
->pdev
->dev
;
781 struct dma_device
*dma
= &ioat_dma
->dma_dev
;
784 dev_dbg(dev
, "%s\n", __func__
);
786 if (!dma_has_cap(DMA_XOR
, dma
->cap_mask
))
789 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
790 xor_srcs
[src_idx
] = alloc_page(GFP_KERNEL
);
791 if (!xor_srcs
[src_idx
]) {
793 __free_page(xor_srcs
[src_idx
]);
798 dest
= alloc_page(GFP_KERNEL
);
801 __free_page(xor_srcs
[src_idx
]);
805 /* Fill in src buffers */
806 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++) {
807 u8
*ptr
= page_address(xor_srcs
[src_idx
]);
809 for (i
= 0; i
< PAGE_SIZE
; i
++)
810 ptr
[i
] = (1 << src_idx
);
813 for (src_idx
= 0; src_idx
< IOAT_NUM_SRC_TEST
; src_idx
++)
814 cmp_byte
^= (u8
) (1 << src_idx
);
816 cmp_word
= (cmp_byte
<< 24) | (cmp_byte
<< 16) |
817 (cmp_byte
<< 8) | cmp_byte
;
819 memset(page_address(dest
), 0, PAGE_SIZE
);
821 dma_chan
= container_of(dma
->channels
.next
, struct dma_chan
,
823 if (dma
->device_alloc_chan_resources(dma_chan
) < 1) {
831 dest_dma
= dma_map_page(dev
, dest
, 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
832 if (dma_mapping_error(dev
, dest_dma
)) {
837 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
838 dma_srcs
[i
] = DMA_ERROR_CODE
;
839 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++) {
840 dma_srcs
[i
] = dma_map_page(dev
, xor_srcs
[i
], 0, PAGE_SIZE
,
842 if (dma_mapping_error(dev
, dma_srcs
[i
])) {
847 tx
= dma
->device_prep_dma_xor(dma_chan
, dest_dma
, dma_srcs
,
848 IOAT_NUM_SRC_TEST
, PAGE_SIZE
,
852 dev_err(dev
, "Self-test xor prep failed\n");
858 init_completion(&cmp
);
859 tx
->callback
= ioat_dma_test_callback
;
860 tx
->callback_param
= &cmp
;
861 cookie
= tx
->tx_submit(tx
);
863 dev_err(dev
, "Self-test xor setup failed\n");
867 dma
->device_issue_pending(dma_chan
);
869 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
872 dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_COMPLETE
) {
873 dev_err(dev
, "Self-test xor timed out\n");
878 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
879 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
881 dma_sync_single_for_cpu(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
882 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(u32
)); i
++) {
883 u32
*ptr
= page_address(dest
);
885 if (ptr
[i
] != cmp_word
) {
886 dev_err(dev
, "Self-test xor failed compare\n");
891 dma_sync_single_for_device(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
893 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
, DMA_FROM_DEVICE
);
895 /* skip validate if the capability is not present */
896 if (!dma_has_cap(DMA_XOR_VAL
, dma_chan
->device
->cap_mask
))
899 op
= IOAT_OP_XOR_VAL
;
901 /* validate the sources with the destintation page */
902 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
903 xor_val_srcs
[i
] = xor_srcs
[i
];
904 xor_val_srcs
[i
] = dest
;
908 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
909 dma_srcs
[i
] = DMA_ERROR_CODE
;
910 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++) {
911 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
913 if (dma_mapping_error(dev
, dma_srcs
[i
])) {
918 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
919 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
920 &xor_val_result
, DMA_PREP_INTERRUPT
);
922 dev_err(dev
, "Self-test zero prep failed\n");
928 init_completion(&cmp
);
929 tx
->callback
= ioat_dma_test_callback
;
930 tx
->callback_param
= &cmp
;
931 cookie
= tx
->tx_submit(tx
);
933 dev_err(dev
, "Self-test zero setup failed\n");
937 dma
->device_issue_pending(dma_chan
);
939 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
942 dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_COMPLETE
) {
943 dev_err(dev
, "Self-test validate timed out\n");
948 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
949 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
951 if (xor_val_result
!= 0) {
952 dev_err(dev
, "Self-test validate failed compare\n");
957 memset(page_address(dest
), 0, PAGE_SIZE
);
959 /* test for non-zero parity sum */
960 op
= IOAT_OP_XOR_VAL
;
963 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
964 dma_srcs
[i
] = DMA_ERROR_CODE
;
965 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++) {
966 dma_srcs
[i
] = dma_map_page(dev
, xor_val_srcs
[i
], 0, PAGE_SIZE
,
968 if (dma_mapping_error(dev
, dma_srcs
[i
])) {
973 tx
= dma
->device_prep_dma_xor_val(dma_chan
, dma_srcs
,
974 IOAT_NUM_SRC_TEST
+ 1, PAGE_SIZE
,
975 &xor_val_result
, DMA_PREP_INTERRUPT
);
977 dev_err(dev
, "Self-test 2nd zero prep failed\n");
983 init_completion(&cmp
);
984 tx
->callback
= ioat_dma_test_callback
;
985 tx
->callback_param
= &cmp
;
986 cookie
= tx
->tx_submit(tx
);
988 dev_err(dev
, "Self-test 2nd zero setup failed\n");
992 dma
->device_issue_pending(dma_chan
);
994 tmo
= wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000));
997 dma
->device_tx_status(dma_chan
, cookie
, NULL
) != DMA_COMPLETE
) {
998 dev_err(dev
, "Self-test 2nd validate timed out\n");
1003 if (xor_val_result
!= SUM_CHECK_P_RESULT
) {
1004 dev_err(dev
, "Self-test validate failed compare\n");
1009 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1010 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
, DMA_TO_DEVICE
);
1012 goto free_resources
;
1014 if (op
== IOAT_OP_XOR
) {
1015 if (dest_dma
!= DMA_ERROR_CODE
)
1016 dma_unmap_page(dev
, dest_dma
, PAGE_SIZE
,
1018 for (i
= 0; i
< IOAT_NUM_SRC_TEST
; i
++)
1019 if (dma_srcs
[i
] != DMA_ERROR_CODE
)
1020 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1022 } else if (op
== IOAT_OP_XOR_VAL
) {
1023 for (i
= 0; i
< IOAT_NUM_SRC_TEST
+ 1; i
++)
1024 if (dma_srcs
[i
] != DMA_ERROR_CODE
)
1025 dma_unmap_page(dev
, dma_srcs
[i
], PAGE_SIZE
,
1029 dma
->device_free_chan_resources(dma_chan
);
1031 src_idx
= IOAT_NUM_SRC_TEST
;
1033 __free_page(xor_srcs
[src_idx
]);
1038 static int ioat3_dma_self_test(struct ioatdma_device
*ioat_dma
)
1042 rc
= ioat_dma_self_test(ioat_dma
);
1046 rc
= ioat_xor_val_self_test(ioat_dma
);
1051 static void ioat_intr_quirk(struct ioatdma_device
*ioat_dma
)
1053 struct dma_device
*dma
;
1055 struct ioatdma_chan
*ioat_chan
;
1058 dma
= &ioat_dma
->dma_dev
;
1061 * if we have descriptor write back error status, we mask the
1064 if (ioat_dma
->cap
& IOAT_CAP_DWBES
) {
1065 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1066 ioat_chan
= to_ioat_chan(c
);
1067 errmask
= readl(ioat_chan
->reg_base
+
1068 IOAT_CHANERR_MASK_OFFSET
);
1069 errmask
|= IOAT_CHANERR_XOR_P_OR_CRC_ERR
|
1070 IOAT_CHANERR_XOR_Q_ERR
;
1071 writel(errmask
, ioat_chan
->reg_base
+
1072 IOAT_CHANERR_MASK_OFFSET
);
1077 static int ioat3_dma_probe(struct ioatdma_device
*ioat_dma
, int dca
)
1079 struct pci_dev
*pdev
= ioat_dma
->pdev
;
1080 int dca_en
= system_has_dca_enabled(pdev
);
1081 struct dma_device
*dma
;
1083 struct ioatdma_chan
*ioat_chan
;
1087 dma
= &ioat_dma
->dma_dev
;
1088 dma
->device_prep_dma_memcpy
= ioat_dma_prep_memcpy_lock
;
1089 dma
->device_issue_pending
= ioat_issue_pending
;
1090 dma
->device_alloc_chan_resources
= ioat_alloc_chan_resources
;
1091 dma
->device_free_chan_resources
= ioat_free_chan_resources
;
1093 dma_cap_set(DMA_INTERRUPT
, dma
->cap_mask
);
1094 dma
->device_prep_dma_interrupt
= ioat_prep_interrupt_lock
;
1096 ioat_dma
->cap
= readl(ioat_dma
->reg_base
+ IOAT_DMA_CAP_OFFSET
);
1098 if (is_xeon_cb32(pdev
) || is_bwd_noraid(pdev
))
1100 ~(IOAT_CAP_XOR
| IOAT_CAP_PQ
| IOAT_CAP_RAID16SS
);
1102 /* dca is incompatible with raid operations */
1103 if (dca_en
&& (ioat_dma
->cap
& (IOAT_CAP_XOR
|IOAT_CAP_PQ
)))
1104 ioat_dma
->cap
&= ~(IOAT_CAP_XOR
|IOAT_CAP_PQ
);
1106 if (ioat_dma
->cap
& IOAT_CAP_XOR
) {
1109 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1110 dma
->device_prep_dma_xor
= ioat_prep_xor
;
1112 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1113 dma
->device_prep_dma_xor_val
= ioat_prep_xor_val
;
1116 if (ioat_dma
->cap
& IOAT_CAP_PQ
) {
1118 dma
->device_prep_dma_pq
= ioat_prep_pq
;
1119 dma
->device_prep_dma_pq_val
= ioat_prep_pq_val
;
1120 dma_cap_set(DMA_PQ
, dma
->cap_mask
);
1121 dma_cap_set(DMA_PQ_VAL
, dma
->cap_mask
);
1123 if (ioat_dma
->cap
& IOAT_CAP_RAID16SS
)
1124 dma_set_maxpq(dma
, 16, 0);
1126 dma_set_maxpq(dma
, 8, 0);
1128 if (!(ioat_dma
->cap
& IOAT_CAP_XOR
)) {
1129 dma
->device_prep_dma_xor
= ioat_prep_pqxor
;
1130 dma
->device_prep_dma_xor_val
= ioat_prep_pqxor_val
;
1131 dma_cap_set(DMA_XOR
, dma
->cap_mask
);
1132 dma_cap_set(DMA_XOR_VAL
, dma
->cap_mask
);
1134 if (ioat_dma
->cap
& IOAT_CAP_RAID16SS
)
1141 dma
->device_tx_status
= ioat_tx_status
;
1143 /* starting with CB3.3 super extended descriptors are supported */
1144 if (ioat_dma
->cap
& IOAT_CAP_RAID16SS
) {
1148 for (i
= 0; i
< MAX_SED_POOLS
; i
++) {
1149 snprintf(pool_name
, 14, "ioat_hw%d_sed", i
);
1151 /* allocate SED DMA pool */
1152 ioat_dma
->sed_hw_pool
[i
] = dmam_pool_create(pool_name
,
1154 SED_SIZE
* (i
+ 1), 64, 0);
1155 if (!ioat_dma
->sed_hw_pool
[i
])
1161 if (!(ioat_dma
->cap
& (IOAT_CAP_XOR
| IOAT_CAP_PQ
)))
1162 dma_cap_set(DMA_PRIVATE
, dma
->cap_mask
);
1164 err
= ioat_probe(ioat_dma
);
1168 list_for_each_entry(c
, &dma
->channels
, device_node
) {
1169 ioat_chan
= to_ioat_chan(c
);
1170 writel(IOAT_DMA_DCA_ANY_CPU
,
1171 ioat_chan
->reg_base
+ IOAT_DCACTRL_OFFSET
);
1174 err
= ioat_register(ioat_dma
);
1178 ioat_kobject_add(ioat_dma
, &ioat_ktype
);
1181 ioat_dma
->dca
= ioat_dca_init(pdev
, ioat_dma
->reg_base
);
1183 /* disable relaxed ordering */
1184 err
= pcie_capability_read_word(pdev
, IOAT_DEVCTRL_OFFSET
, &val16
);
1188 /* clear relaxed ordering enable */
1189 val16
&= ~IOAT_DEVCTRL_ROE
;
1190 err
= pcie_capability_write_word(pdev
, IOAT_DEVCTRL_OFFSET
, val16
);
1197 static void ioat_shutdown(struct pci_dev
*pdev
)
1199 struct ioatdma_device
*ioat_dma
= pci_get_drvdata(pdev
);
1200 struct ioatdma_chan
*ioat_chan
;
1206 for (i
= 0; i
< IOAT_MAX_CHANS
; i
++) {
1207 ioat_chan
= ioat_dma
->idx
[i
];
1211 spin_lock_bh(&ioat_chan
->prep_lock
);
1212 set_bit(IOAT_CHAN_DOWN
, &ioat_chan
->state
);
1213 del_timer_sync(&ioat_chan
->timer
);
1214 spin_unlock_bh(&ioat_chan
->prep_lock
);
1215 /* this should quiesce then reset */
1216 ioat_reset_hw(ioat_chan
);
1219 ioat_disable_interrupts(ioat_dma
);
1222 static void ioat_resume(struct ioatdma_device
*ioat_dma
)
1224 struct ioatdma_chan
*ioat_chan
;
1228 for (i
= 0; i
< IOAT_MAX_CHANS
; i
++) {
1229 ioat_chan
= ioat_dma
->idx
[i
];
1233 spin_lock_bh(&ioat_chan
->prep_lock
);
1234 clear_bit(IOAT_CHAN_DOWN
, &ioat_chan
->state
);
1235 spin_unlock_bh(&ioat_chan
->prep_lock
);
1237 chanerr
= readl(ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1238 writel(chanerr
, ioat_chan
->reg_base
+ IOAT_CHANERR_OFFSET
);
1240 /* no need to reset as shutdown already did that */
1244 #define DRV_NAME "ioatdma"
1246 static pci_ers_result_t
ioat_pcie_error_detected(struct pci_dev
*pdev
,
1247 enum pci_channel_state error
)
1249 dev_dbg(&pdev
->dev
, "%s: PCIe AER error %d\n", DRV_NAME
, error
);
1251 /* quiesce and block I/O */
1252 ioat_shutdown(pdev
);
1254 return PCI_ERS_RESULT_NEED_RESET
;
1257 static pci_ers_result_t
ioat_pcie_error_slot_reset(struct pci_dev
*pdev
)
1259 pci_ers_result_t result
= PCI_ERS_RESULT_RECOVERED
;
1262 dev_dbg(&pdev
->dev
, "%s post reset handling\n", DRV_NAME
);
1264 if (pci_enable_device_mem(pdev
) < 0) {
1266 "Failed to enable PCIe device after reset.\n");
1267 result
= PCI_ERS_RESULT_DISCONNECT
;
1269 pci_set_master(pdev
);
1270 pci_restore_state(pdev
);
1271 pci_save_state(pdev
);
1272 pci_wake_from_d3(pdev
, false);
1275 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
1278 "AER uncorrect error status clear failed: %#x\n", err
);
1284 static void ioat_pcie_error_resume(struct pci_dev
*pdev
)
1286 struct ioatdma_device
*ioat_dma
= pci_get_drvdata(pdev
);
1288 dev_dbg(&pdev
->dev
, "%s: AER handling resuming\n", DRV_NAME
);
1290 /* initialize and bring everything back */
1291 ioat_resume(ioat_dma
);
1294 static const struct pci_error_handlers ioat_err_handler
= {
1295 .error_detected
= ioat_pcie_error_detected
,
1296 .slot_reset
= ioat_pcie_error_slot_reset
,
1297 .resume
= ioat_pcie_error_resume
,
1300 static struct pci_driver ioat_pci_driver
= {
1302 .id_table
= ioat_pci_tbl
,
1303 .probe
= ioat_pci_probe
,
1304 .remove
= ioat_remove
,
1305 .shutdown
= ioat_shutdown
,
1306 .err_handler
= &ioat_err_handler
,
1309 static struct ioatdma_device
*
1310 alloc_ioatdma(struct pci_dev
*pdev
, void __iomem
*iobase
)
1312 struct device
*dev
= &pdev
->dev
;
1313 struct ioatdma_device
*d
= devm_kzalloc(dev
, sizeof(*d
), GFP_KERNEL
);
1318 d
->reg_base
= iobase
;
1322 static int ioat_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1324 void __iomem
* const *iomap
;
1325 struct device
*dev
= &pdev
->dev
;
1326 struct ioatdma_device
*device
;
1329 err
= pcim_enable_device(pdev
);
1333 err
= pcim_iomap_regions(pdev
, 1 << IOAT_MMIO_BAR
, DRV_NAME
);
1336 iomap
= pcim_iomap_table(pdev
);
1340 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1342 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1346 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1348 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1352 device
= alloc_ioatdma(pdev
, iomap
[IOAT_MMIO_BAR
]);
1355 pci_set_master(pdev
);
1356 pci_set_drvdata(pdev
, device
);
1358 device
->version
= readb(device
->reg_base
+ IOAT_VER_OFFSET
);
1359 if (device
->version
>= IOAT_VER_3_0
) {
1360 err
= ioat3_dma_probe(device
, ioat_dca_enabled
);
1362 if (device
->version
>= IOAT_VER_3_3
)
1363 pci_enable_pcie_error_reporting(pdev
);
1368 dev_err(dev
, "Intel(R) I/OAT DMA Engine init failed\n");
1369 pci_disable_pcie_error_reporting(pdev
);
1376 static void ioat_remove(struct pci_dev
*pdev
)
1378 struct ioatdma_device
*device
= pci_get_drvdata(pdev
);
1383 dev_err(&pdev
->dev
, "Removing dma and dca services\n");
1385 unregister_dca_provider(device
->dca
, &pdev
->dev
);
1386 free_dca_provider(device
->dca
);
1390 pci_disable_pcie_error_reporting(pdev
);
1391 ioat_dma_remove(device
);
1394 static int __init
ioat_init_module(void)
1398 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1399 DRV_NAME
, IOAT_DMA_VERSION
);
1401 ioat_cache
= kmem_cache_create("ioat", sizeof(struct ioat_ring_ent
),
1402 0, SLAB_HWCACHE_ALIGN
, NULL
);
1406 ioat_sed_cache
= KMEM_CACHE(ioat_sed_ent
, 0);
1407 if (!ioat_sed_cache
)
1408 goto err_ioat_cache
;
1410 err
= pci_register_driver(&ioat_pci_driver
);
1412 goto err_ioat3_cache
;
1417 kmem_cache_destroy(ioat_sed_cache
);
1420 kmem_cache_destroy(ioat_cache
);
1424 module_init(ioat_init_module
);
1426 static void __exit
ioat_exit_module(void)
1428 pci_unregister_driver(&ioat_pci_driver
);
1429 kmem_cache_destroy(ioat_cache
);
1431 module_exit(ioat_exit_module
);