]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2007 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine, which does asynchronous | |
25 | * copy operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/delay.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/workqueue.h> | |
36 | #include <linux/i7300_idle.h> | |
37 | #include "ioatdma.h" | |
38 | #include "ioatdma_registers.h" | |
39 | #include "ioatdma_hw.h" | |
40 | ||
41 | #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) | |
42 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) | |
43 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | |
44 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) | |
45 | ||
46 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | |
47 | static int ioat_pending_level = 4; | |
48 | module_param(ioat_pending_level, int, 0644); | |
49 | MODULE_PARM_DESC(ioat_pending_level, | |
50 | "high-water mark for pushing ioat descriptors (default: 4)"); | |
51 | ||
52 | #define RESET_DELAY msecs_to_jiffies(100) | |
53 | #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) | |
54 | static void ioat_dma_chan_reset_part2(struct work_struct *work); | |
55 | static void ioat_dma_chan_watchdog(struct work_struct *work); | |
56 | ||
57 | /* | |
58 | * workaround for IOAT ver.3.0 null descriptor issue | |
59 | * (channel returns error when size is 0) | |
60 | */ | |
61 | #define NULL_DESC_BUFFER_SIZE 1 | |
62 | ||
63 | /* internal functions */ | |
64 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); | |
65 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); | |
66 | ||
67 | static struct ioat_desc_sw * | |
68 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | |
69 | static struct ioat_desc_sw * | |
70 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); | |
71 | ||
72 | static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( | |
73 | struct ioatdma_device *device, | |
74 | int index) | |
75 | { | |
76 | return device->idx[index]; | |
77 | } | |
78 | ||
79 | /** | |
80 | * ioat_dma_do_interrupt - handler used for single vector interrupt mode | |
81 | * @irq: interrupt id | |
82 | * @data: interrupt data | |
83 | */ | |
84 | static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) | |
85 | { | |
86 | struct ioatdma_device *instance = data; | |
87 | struct ioat_dma_chan *ioat_chan; | |
88 | unsigned long attnstatus; | |
89 | int bit; | |
90 | u8 intrctrl; | |
91 | ||
92 | intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
93 | ||
94 | if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN)) | |
95 | return IRQ_NONE; | |
96 | ||
97 | if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) { | |
98 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
99 | return IRQ_NONE; | |
100 | } | |
101 | ||
102 | attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); | |
103 | for_each_bit(bit, &attnstatus, BITS_PER_LONG) { | |
104 | ioat_chan = ioat_lookup_chan_by_index(instance, bit); | |
105 | tasklet_schedule(&ioat_chan->cleanup_task); | |
106 | } | |
107 | ||
108 | writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); | |
109 | return IRQ_HANDLED; | |
110 | } | |
111 | ||
112 | /** | |
113 | * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode | |
114 | * @irq: interrupt id | |
115 | * @data: interrupt data | |
116 | */ | |
117 | static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) | |
118 | { | |
119 | struct ioat_dma_chan *ioat_chan = data; | |
120 | ||
121 | tasklet_schedule(&ioat_chan->cleanup_task); | |
122 | ||
123 | return IRQ_HANDLED; | |
124 | } | |
125 | ||
126 | static void ioat_dma_cleanup_tasklet(unsigned long data); | |
127 | ||
128 | /** | |
129 | * ioat_dma_enumerate_channels - find and initialize the device's channels | |
130 | * @device: the device to be enumerated | |
131 | */ | |
132 | static int ioat_dma_enumerate_channels(struct ioatdma_device *device) | |
133 | { | |
134 | u8 xfercap_scale; | |
135 | u32 xfercap; | |
136 | int i; | |
137 | struct ioat_dma_chan *ioat_chan; | |
138 | ||
139 | /* | |
140 | * IOAT ver.3 workarounds | |
141 | */ | |
142 | if (device->version == IOAT_VER_3_0) { | |
143 | u32 chan_err_mask; | |
144 | u16 dev_id; | |
145 | u32 dmauncerrsts; | |
146 | ||
147 | /* | |
148 | * Write CHANERRMSK_INT with 3E07h to mask out the errors | |
149 | * that can cause stability issues for IOAT ver.3 | |
150 | */ | |
151 | chan_err_mask = 0x3E07; | |
152 | pci_write_config_dword(device->pdev, | |
153 | IOAT_PCI_CHANERRMASK_INT_OFFSET, | |
154 | chan_err_mask); | |
155 | ||
156 | /* | |
157 | * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | |
158 | * (workaround for spurious config parity error after restart) | |
159 | */ | |
160 | pci_read_config_word(device->pdev, | |
161 | IOAT_PCI_DEVICE_ID_OFFSET, | |
162 | &dev_id); | |
163 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | |
164 | dmauncerrsts = 0x10; | |
165 | pci_write_config_dword(device->pdev, | |
166 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | |
167 | dmauncerrsts); | |
168 | } | |
169 | } | |
170 | ||
171 | device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
172 | xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); | |
173 | xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); | |
174 | ||
175 | #if CONFIG_I7300_IDLE_IOAT_CHANNEL | |
176 | if (i7300_idle_platform_probe(NULL, NULL) == 0) { | |
177 | device->common.chancnt--; | |
178 | } | |
179 | #endif | |
180 | for (i = 0; i < device->common.chancnt; i++) { | |
181 | ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); | |
182 | if (!ioat_chan) { | |
183 | device->common.chancnt = i; | |
184 | break; | |
185 | } | |
186 | ||
187 | ioat_chan->device = device; | |
188 | ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); | |
189 | ioat_chan->xfercap = xfercap; | |
190 | ioat_chan->desccount = 0; | |
191 | INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); | |
192 | if (ioat_chan->device->version != IOAT_VER_1_2) { | |
193 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | |
194 | | IOAT_DMA_DCA_ANY_CPU, | |
195 | ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); | |
196 | } | |
197 | spin_lock_init(&ioat_chan->cleanup_lock); | |
198 | spin_lock_init(&ioat_chan->desc_lock); | |
199 | INIT_LIST_HEAD(&ioat_chan->free_desc); | |
200 | INIT_LIST_HEAD(&ioat_chan->used_desc); | |
201 | /* This should be made common somewhere in dmaengine.c */ | |
202 | ioat_chan->common.device = &device->common; | |
203 | list_add_tail(&ioat_chan->common.device_node, | |
204 | &device->common.channels); | |
205 | device->idx[i] = ioat_chan; | |
206 | tasklet_init(&ioat_chan->cleanup_task, | |
207 | ioat_dma_cleanup_tasklet, | |
208 | (unsigned long) ioat_chan); | |
209 | tasklet_disable(&ioat_chan->cleanup_task); | |
210 | } | |
211 | return device->common.chancnt; | |
212 | } | |
213 | ||
214 | /** | |
215 | * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended | |
216 | * descriptors to hw | |
217 | * @chan: DMA channel handle | |
218 | */ | |
219 | static inline void __ioat1_dma_memcpy_issue_pending( | |
220 | struct ioat_dma_chan *ioat_chan) | |
221 | { | |
222 | ioat_chan->pending = 0; | |
223 | writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); | |
224 | } | |
225 | ||
226 | static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) | |
227 | { | |
228 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
229 | ||
230 | if (ioat_chan->pending > 0) { | |
231 | spin_lock_bh(&ioat_chan->desc_lock); | |
232 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | |
233 | spin_unlock_bh(&ioat_chan->desc_lock); | |
234 | } | |
235 | } | |
236 | ||
237 | static inline void __ioat2_dma_memcpy_issue_pending( | |
238 | struct ioat_dma_chan *ioat_chan) | |
239 | { | |
240 | ioat_chan->pending = 0; | |
241 | writew(ioat_chan->dmacount, | |
242 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
243 | } | |
244 | ||
245 | static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) | |
246 | { | |
247 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
248 | ||
249 | if (ioat_chan->pending > 0) { | |
250 | spin_lock_bh(&ioat_chan->desc_lock); | |
251 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
252 | spin_unlock_bh(&ioat_chan->desc_lock); | |
253 | } | |
254 | } | |
255 | ||
256 | ||
257 | /** | |
258 | * ioat_dma_chan_reset_part2 - reinit the channel after a reset | |
259 | */ | |
260 | static void ioat_dma_chan_reset_part2(struct work_struct *work) | |
261 | { | |
262 | struct ioat_dma_chan *ioat_chan = | |
263 | container_of(work, struct ioat_dma_chan, work.work); | |
264 | struct ioat_desc_sw *desc; | |
265 | ||
266 | spin_lock_bh(&ioat_chan->cleanup_lock); | |
267 | spin_lock_bh(&ioat_chan->desc_lock); | |
268 | ||
269 | ioat_chan->completion_virt->low = 0; | |
270 | ioat_chan->completion_virt->high = 0; | |
271 | ioat_chan->pending = 0; | |
272 | ||
273 | /* | |
274 | * count the descriptors waiting, and be sure to do it | |
275 | * right for both the CB1 line and the CB2 ring | |
276 | */ | |
277 | ioat_chan->dmacount = 0; | |
278 | if (ioat_chan->used_desc.prev) { | |
279 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
280 | do { | |
281 | ioat_chan->dmacount++; | |
282 | desc = to_ioat_desc(desc->node.next); | |
283 | } while (&desc->node != ioat_chan->used_desc.next); | |
284 | } | |
285 | ||
286 | /* | |
287 | * write the new starting descriptor address | |
288 | * this puts channel engine into ARMED state | |
289 | */ | |
290 | desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
291 | switch (ioat_chan->device->version) { | |
292 | case IOAT_VER_1_2: | |
293 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
294 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | |
295 | writel(((u64) desc->async_tx.phys) >> 32, | |
296 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | |
297 | ||
298 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | |
299 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
300 | break; | |
301 | case IOAT_VER_2_0: | |
302 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
303 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
304 | writel(((u64) desc->async_tx.phys) >> 32, | |
305 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
306 | ||
307 | /* tell the engine to go with what's left to be done */ | |
308 | writew(ioat_chan->dmacount, | |
309 | ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
310 | ||
311 | break; | |
312 | } | |
313 | dev_err(&ioat_chan->device->pdev->dev, | |
314 | "chan%d reset - %d descs waiting, %d total desc\n", | |
315 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
316 | ||
317 | spin_unlock_bh(&ioat_chan->desc_lock); | |
318 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
319 | } | |
320 | ||
321 | /** | |
322 | * ioat_dma_reset_channel - restart a channel | |
323 | * @ioat_chan: IOAT DMA channel handle | |
324 | */ | |
325 | static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) | |
326 | { | |
327 | u32 chansts, chanerr; | |
328 | ||
329 | if (!ioat_chan->used_desc.prev) | |
330 | return; | |
331 | ||
332 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
333 | chansts = (ioat_chan->completion_virt->low | |
334 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS); | |
335 | if (chanerr) { | |
336 | dev_err(&ioat_chan->device->pdev->dev, | |
337 | "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", | |
338 | chan_num(ioat_chan), chansts, chanerr); | |
339 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
340 | } | |
341 | ||
342 | /* | |
343 | * whack it upside the head with a reset | |
344 | * and wait for things to settle out. | |
345 | * force the pending count to a really big negative | |
346 | * to make sure no one forces an issue_pending | |
347 | * while we're waiting. | |
348 | */ | |
349 | ||
350 | spin_lock_bh(&ioat_chan->desc_lock); | |
351 | ioat_chan->pending = INT_MIN; | |
352 | writeb(IOAT_CHANCMD_RESET, | |
353 | ioat_chan->reg_base | |
354 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
355 | spin_unlock_bh(&ioat_chan->desc_lock); | |
356 | ||
357 | /* schedule the 2nd half instead of sleeping a long time */ | |
358 | schedule_delayed_work(&ioat_chan->work, RESET_DELAY); | |
359 | } | |
360 | ||
361 | /** | |
362 | * ioat_dma_chan_watchdog - watch for stuck channels | |
363 | */ | |
364 | static void ioat_dma_chan_watchdog(struct work_struct *work) | |
365 | { | |
366 | struct ioatdma_device *device = | |
367 | container_of(work, struct ioatdma_device, work.work); | |
368 | struct ioat_dma_chan *ioat_chan; | |
369 | int i; | |
370 | ||
371 | union { | |
372 | u64 full; | |
373 | struct { | |
374 | u32 low; | |
375 | u32 high; | |
376 | }; | |
377 | } completion_hw; | |
378 | unsigned long compl_desc_addr_hw; | |
379 | ||
380 | for (i = 0; i < device->common.chancnt; i++) { | |
381 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
382 | ||
383 | if (ioat_chan->device->version == IOAT_VER_1_2 | |
384 | /* have we started processing anything yet */ | |
385 | && ioat_chan->last_completion | |
386 | /* have we completed any since last watchdog cycle? */ | |
387 | && (ioat_chan->last_completion == | |
388 | ioat_chan->watchdog_completion) | |
389 | /* has TCP stuck on one cookie since last watchdog? */ | |
390 | && (ioat_chan->watchdog_tcp_cookie == | |
391 | ioat_chan->watchdog_last_tcp_cookie) | |
392 | && (ioat_chan->watchdog_tcp_cookie != | |
393 | ioat_chan->completed_cookie) | |
394 | /* is there something in the chain to be processed? */ | |
395 | /* CB1 chain always has at least the last one processed */ | |
396 | && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) | |
397 | && ioat_chan->pending == 0) { | |
398 | ||
399 | /* | |
400 | * check CHANSTS register for completed | |
401 | * descriptor address. | |
402 | * if it is different than completion writeback, | |
403 | * it is not zero | |
404 | * and it has changed since the last watchdog | |
405 | * we can assume that channel | |
406 | * is still working correctly | |
407 | * and the problem is in completion writeback. | |
408 | * update completion writeback | |
409 | * with actual CHANSTS value | |
410 | * else | |
411 | * try resetting the channel | |
412 | */ | |
413 | ||
414 | completion_hw.low = readl(ioat_chan->reg_base + | |
415 | IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); | |
416 | completion_hw.high = readl(ioat_chan->reg_base + | |
417 | IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); | |
418 | #if (BITS_PER_LONG == 64) | |
419 | compl_desc_addr_hw = | |
420 | completion_hw.full | |
421 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
422 | #else | |
423 | compl_desc_addr_hw = | |
424 | completion_hw.low & IOAT_LOW_COMPLETION_MASK; | |
425 | #endif | |
426 | ||
427 | if ((compl_desc_addr_hw != 0) | |
428 | && (compl_desc_addr_hw != ioat_chan->watchdog_completion) | |
429 | && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { | |
430 | ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; | |
431 | ioat_chan->completion_virt->low = completion_hw.low; | |
432 | ioat_chan->completion_virt->high = completion_hw.high; | |
433 | } else { | |
434 | ioat_dma_reset_channel(ioat_chan); | |
435 | ioat_chan->watchdog_completion = 0; | |
436 | ioat_chan->last_compl_desc_addr_hw = 0; | |
437 | } | |
438 | ||
439 | /* | |
440 | * for version 2.0 if there are descriptors yet to be processed | |
441 | * and the last completed hasn't changed since the last watchdog | |
442 | * if they haven't hit the pending level | |
443 | * issue the pending to push them through | |
444 | * else | |
445 | * try resetting the channel | |
446 | */ | |
447 | } else if (ioat_chan->device->version == IOAT_VER_2_0 | |
448 | && ioat_chan->used_desc.prev | |
449 | && ioat_chan->last_completion | |
450 | && ioat_chan->last_completion == ioat_chan->watchdog_completion) { | |
451 | ||
452 | if (ioat_chan->pending < ioat_pending_level) | |
453 | ioat2_dma_memcpy_issue_pending(&ioat_chan->common); | |
454 | else { | |
455 | ioat_dma_reset_channel(ioat_chan); | |
456 | ioat_chan->watchdog_completion = 0; | |
457 | } | |
458 | } else { | |
459 | ioat_chan->last_compl_desc_addr_hw = 0; | |
460 | ioat_chan->watchdog_completion | |
461 | = ioat_chan->last_completion; | |
462 | } | |
463 | ||
464 | ioat_chan->watchdog_last_tcp_cookie = | |
465 | ioat_chan->watchdog_tcp_cookie; | |
466 | } | |
467 | ||
468 | schedule_delayed_work(&device->work, WATCHDOG_DELAY); | |
469 | } | |
470 | ||
471 | static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) | |
472 | { | |
473 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | |
474 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | |
475 | struct ioat_desc_sw *prev, *new; | |
476 | struct ioat_dma_descriptor *hw; | |
477 | dma_cookie_t cookie; | |
478 | LIST_HEAD(new_chain); | |
479 | u32 copy; | |
480 | size_t len; | |
481 | dma_addr_t src, dst; | |
482 | unsigned long orig_flags; | |
483 | unsigned int desc_count = 0; | |
484 | ||
485 | /* src and dest and len are stored in the initial descriptor */ | |
486 | len = first->len; | |
487 | src = first->src; | |
488 | dst = first->dst; | |
489 | orig_flags = first->async_tx.flags; | |
490 | new = first; | |
491 | ||
492 | spin_lock_bh(&ioat_chan->desc_lock); | |
493 | prev = to_ioat_desc(ioat_chan->used_desc.prev); | |
494 | prefetch(prev->hw); | |
495 | do { | |
496 | copy = min_t(size_t, len, ioat_chan->xfercap); | |
497 | ||
498 | async_tx_ack(&new->async_tx); | |
499 | ||
500 | hw = new->hw; | |
501 | hw->size = copy; | |
502 | hw->ctl = 0; | |
503 | hw->src_addr = src; | |
504 | hw->dst_addr = dst; | |
505 | hw->next = 0; | |
506 | ||
507 | /* chain together the physical address list for the HW */ | |
508 | wmb(); | |
509 | prev->hw->next = (u64) new->async_tx.phys; | |
510 | ||
511 | len -= copy; | |
512 | dst += copy; | |
513 | src += copy; | |
514 | ||
515 | list_add_tail(&new->node, &new_chain); | |
516 | desc_count++; | |
517 | prev = new; | |
518 | } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); | |
519 | ||
520 | if (!new) { | |
521 | dev_err(&ioat_chan->device->pdev->dev, | |
522 | "tx submit failed\n"); | |
523 | spin_unlock_bh(&ioat_chan->desc_lock); | |
524 | return -ENOMEM; | |
525 | } | |
526 | ||
527 | hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | |
528 | if (new->async_tx.callback) { | |
529 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | |
530 | if (first != new) { | |
531 | /* move callback into to last desc */ | |
532 | new->async_tx.callback = first->async_tx.callback; | |
533 | new->async_tx.callback_param | |
534 | = first->async_tx.callback_param; | |
535 | first->async_tx.callback = NULL; | |
536 | first->async_tx.callback_param = NULL; | |
537 | } | |
538 | } | |
539 | ||
540 | new->tx_cnt = desc_count; | |
541 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | |
542 | ||
543 | /* store the original values for use in later cleanup */ | |
544 | if (new != first) { | |
545 | new->src = first->src; | |
546 | new->dst = first->dst; | |
547 | new->len = first->len; | |
548 | } | |
549 | ||
550 | /* cookie incr and addition to used_list must be atomic */ | |
551 | cookie = ioat_chan->common.cookie; | |
552 | cookie++; | |
553 | if (cookie < 0) | |
554 | cookie = 1; | |
555 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | |
556 | ||
557 | /* write address into NextDescriptor field of last desc in chain */ | |
558 | to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = | |
559 | first->async_tx.phys; | |
560 | list_splice_tail(&new_chain, &ioat_chan->used_desc); | |
561 | ||
562 | ioat_chan->dmacount += desc_count; | |
563 | ioat_chan->pending += desc_count; | |
564 | if (ioat_chan->pending >= ioat_pending_level) | |
565 | __ioat1_dma_memcpy_issue_pending(ioat_chan); | |
566 | spin_unlock_bh(&ioat_chan->desc_lock); | |
567 | ||
568 | return cookie; | |
569 | } | |
570 | ||
571 | static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) | |
572 | { | |
573 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); | |
574 | struct ioat_desc_sw *first = tx_to_ioat_desc(tx); | |
575 | struct ioat_desc_sw *new; | |
576 | struct ioat_dma_descriptor *hw; | |
577 | dma_cookie_t cookie; | |
578 | u32 copy; | |
579 | size_t len; | |
580 | dma_addr_t src, dst; | |
581 | unsigned long orig_flags; | |
582 | unsigned int desc_count = 0; | |
583 | ||
584 | /* src and dest and len are stored in the initial descriptor */ | |
585 | len = first->len; | |
586 | src = first->src; | |
587 | dst = first->dst; | |
588 | orig_flags = first->async_tx.flags; | |
589 | new = first; | |
590 | ||
591 | /* | |
592 | * ioat_chan->desc_lock is still in force in version 2 path | |
593 | * it gets unlocked at end of this function | |
594 | */ | |
595 | do { | |
596 | copy = min_t(size_t, len, ioat_chan->xfercap); | |
597 | ||
598 | async_tx_ack(&new->async_tx); | |
599 | ||
600 | hw = new->hw; | |
601 | hw->size = copy; | |
602 | hw->ctl = 0; | |
603 | hw->src_addr = src; | |
604 | hw->dst_addr = dst; | |
605 | ||
606 | len -= copy; | |
607 | dst += copy; | |
608 | src += copy; | |
609 | desc_count++; | |
610 | } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); | |
611 | ||
612 | if (!new) { | |
613 | dev_err(&ioat_chan->device->pdev->dev, | |
614 | "tx submit failed\n"); | |
615 | spin_unlock_bh(&ioat_chan->desc_lock); | |
616 | return -ENOMEM; | |
617 | } | |
618 | ||
619 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | |
620 | if (new->async_tx.callback) { | |
621 | hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; | |
622 | if (first != new) { | |
623 | /* move callback into to last desc */ | |
624 | new->async_tx.callback = first->async_tx.callback; | |
625 | new->async_tx.callback_param | |
626 | = first->async_tx.callback_param; | |
627 | first->async_tx.callback = NULL; | |
628 | first->async_tx.callback_param = NULL; | |
629 | } | |
630 | } | |
631 | ||
632 | new->tx_cnt = desc_count; | |
633 | new->async_tx.flags = orig_flags; /* client is in control of this ack */ | |
634 | ||
635 | /* store the original values for use in later cleanup */ | |
636 | if (new != first) { | |
637 | new->src = first->src; | |
638 | new->dst = first->dst; | |
639 | new->len = first->len; | |
640 | } | |
641 | ||
642 | /* cookie incr and addition to used_list must be atomic */ | |
643 | cookie = ioat_chan->common.cookie; | |
644 | cookie++; | |
645 | if (cookie < 0) | |
646 | cookie = 1; | |
647 | ioat_chan->common.cookie = new->async_tx.cookie = cookie; | |
648 | ||
649 | ioat_chan->dmacount += desc_count; | |
650 | ioat_chan->pending += desc_count; | |
651 | if (ioat_chan->pending >= ioat_pending_level) | |
652 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
653 | spin_unlock_bh(&ioat_chan->desc_lock); | |
654 | ||
655 | return cookie; | |
656 | } | |
657 | ||
658 | /** | |
659 | * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair | |
660 | * @ioat_chan: the channel supplying the memory pool for the descriptors | |
661 | * @flags: allocation flags | |
662 | */ | |
663 | static struct ioat_desc_sw *ioat_dma_alloc_descriptor( | |
664 | struct ioat_dma_chan *ioat_chan, | |
665 | gfp_t flags) | |
666 | { | |
667 | struct ioat_dma_descriptor *desc; | |
668 | struct ioat_desc_sw *desc_sw; | |
669 | struct ioatdma_device *ioatdma_device; | |
670 | dma_addr_t phys; | |
671 | ||
672 | ioatdma_device = to_ioatdma_device(ioat_chan->common.device); | |
673 | desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); | |
674 | if (unlikely(!desc)) | |
675 | return NULL; | |
676 | ||
677 | desc_sw = kzalloc(sizeof(*desc_sw), flags); | |
678 | if (unlikely(!desc_sw)) { | |
679 | pci_pool_free(ioatdma_device->dma_pool, desc, phys); | |
680 | return NULL; | |
681 | } | |
682 | ||
683 | memset(desc, 0, sizeof(*desc)); | |
684 | dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); | |
685 | switch (ioat_chan->device->version) { | |
686 | case IOAT_VER_1_2: | |
687 | desc_sw->async_tx.tx_submit = ioat1_tx_submit; | |
688 | break; | |
689 | case IOAT_VER_2_0: | |
690 | case IOAT_VER_3_0: | |
691 | desc_sw->async_tx.tx_submit = ioat2_tx_submit; | |
692 | break; | |
693 | } | |
694 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | |
695 | ||
696 | desc_sw->hw = desc; | |
697 | desc_sw->async_tx.phys = phys; | |
698 | ||
699 | return desc_sw; | |
700 | } | |
701 | ||
702 | static int ioat_initial_desc_count = 256; | |
703 | module_param(ioat_initial_desc_count, int, 0644); | |
704 | MODULE_PARM_DESC(ioat_initial_desc_count, | |
705 | "initial descriptors per channel (default: 256)"); | |
706 | ||
707 | /** | |
708 | * ioat2_dma_massage_chan_desc - link the descriptors into a circle | |
709 | * @ioat_chan: the channel to be massaged | |
710 | */ | |
711 | static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) | |
712 | { | |
713 | struct ioat_desc_sw *desc, *_desc; | |
714 | ||
715 | /* setup used_desc */ | |
716 | ioat_chan->used_desc.next = ioat_chan->free_desc.next; | |
717 | ioat_chan->used_desc.prev = NULL; | |
718 | ||
719 | /* pull free_desc out of the circle so that every node is a hw | |
720 | * descriptor, but leave it pointing to the list | |
721 | */ | |
722 | ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; | |
723 | ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; | |
724 | ||
725 | /* circle link the hw descriptors */ | |
726 | desc = to_ioat_desc(ioat_chan->free_desc.next); | |
727 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | |
728 | list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { | |
729 | desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; | |
730 | } | |
731 | } | |
732 | ||
733 | /** | |
734 | * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors | |
735 | * @chan: the channel to be filled out | |
736 | */ | |
737 | static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, | |
738 | struct dma_client *client) | |
739 | { | |
740 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
741 | struct ioat_desc_sw *desc; | |
742 | u16 chanctrl; | |
743 | u32 chanerr; | |
744 | int i; | |
745 | LIST_HEAD(tmp_list); | |
746 | ||
747 | /* have we already been set up? */ | |
748 | if (!list_empty(&ioat_chan->free_desc)) | |
749 | return ioat_chan->desccount; | |
750 | ||
751 | /* Setup register to interrupt and write completion status on error */ | |
752 | chanctrl = IOAT_CHANCTRL_ERR_INT_EN | | |
753 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | | |
754 | IOAT_CHANCTRL_ERR_COMPLETION_EN; | |
755 | writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); | |
756 | ||
757 | chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
758 | if (chanerr) { | |
759 | dev_err(&ioat_chan->device->pdev->dev, | |
760 | "CHANERR = %x, clearing\n", chanerr); | |
761 | writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); | |
762 | } | |
763 | ||
764 | /* Allocate descriptors */ | |
765 | for (i = 0; i < ioat_initial_desc_count; i++) { | |
766 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); | |
767 | if (!desc) { | |
768 | dev_err(&ioat_chan->device->pdev->dev, | |
769 | "Only %d initial descriptors\n", i); | |
770 | break; | |
771 | } | |
772 | list_add_tail(&desc->node, &tmp_list); | |
773 | } | |
774 | spin_lock_bh(&ioat_chan->desc_lock); | |
775 | ioat_chan->desccount = i; | |
776 | list_splice(&tmp_list, &ioat_chan->free_desc); | |
777 | if (ioat_chan->device->version != IOAT_VER_1_2) | |
778 | ioat2_dma_massage_chan_desc(ioat_chan); | |
779 | spin_unlock_bh(&ioat_chan->desc_lock); | |
780 | ||
781 | /* allocate a completion writeback area */ | |
782 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
783 | ioat_chan->completion_virt = | |
784 | pci_pool_alloc(ioat_chan->device->completion_pool, | |
785 | GFP_KERNEL, | |
786 | &ioat_chan->completion_addr); | |
787 | memset(ioat_chan->completion_virt, 0, | |
788 | sizeof(*ioat_chan->completion_virt)); | |
789 | writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, | |
790 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); | |
791 | writel(((u64) ioat_chan->completion_addr) >> 32, | |
792 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | |
793 | ||
794 | tasklet_enable(&ioat_chan->cleanup_task); | |
795 | ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ | |
796 | return ioat_chan->desccount; | |
797 | } | |
798 | ||
799 | /** | |
800 | * ioat_dma_free_chan_resources - release all the descriptors | |
801 | * @chan: the channel to be cleaned | |
802 | */ | |
803 | static void ioat_dma_free_chan_resources(struct dma_chan *chan) | |
804 | { | |
805 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
806 | struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); | |
807 | struct ioat_desc_sw *desc, *_desc; | |
808 | int in_use_descs = 0; | |
809 | ||
810 | tasklet_disable(&ioat_chan->cleanup_task); | |
811 | ioat_dma_memcpy_cleanup(ioat_chan); | |
812 | ||
813 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | |
814 | * before removing DMA descriptor resources. | |
815 | */ | |
816 | writeb(IOAT_CHANCMD_RESET, | |
817 | ioat_chan->reg_base | |
818 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
819 | mdelay(100); | |
820 | ||
821 | spin_lock_bh(&ioat_chan->desc_lock); | |
822 | switch (ioat_chan->device->version) { | |
823 | case IOAT_VER_1_2: | |
824 | list_for_each_entry_safe(desc, _desc, | |
825 | &ioat_chan->used_desc, node) { | |
826 | in_use_descs++; | |
827 | list_del(&desc->node); | |
828 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
829 | desc->async_tx.phys); | |
830 | kfree(desc); | |
831 | } | |
832 | list_for_each_entry_safe(desc, _desc, | |
833 | &ioat_chan->free_desc, node) { | |
834 | list_del(&desc->node); | |
835 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
836 | desc->async_tx.phys); | |
837 | kfree(desc); | |
838 | } | |
839 | break; | |
840 | case IOAT_VER_2_0: | |
841 | case IOAT_VER_3_0: | |
842 | list_for_each_entry_safe(desc, _desc, | |
843 | ioat_chan->free_desc.next, node) { | |
844 | list_del(&desc->node); | |
845 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
846 | desc->async_tx.phys); | |
847 | kfree(desc); | |
848 | } | |
849 | desc = to_ioat_desc(ioat_chan->free_desc.next); | |
850 | pci_pool_free(ioatdma_device->dma_pool, desc->hw, | |
851 | desc->async_tx.phys); | |
852 | kfree(desc); | |
853 | INIT_LIST_HEAD(&ioat_chan->free_desc); | |
854 | INIT_LIST_HEAD(&ioat_chan->used_desc); | |
855 | break; | |
856 | } | |
857 | spin_unlock_bh(&ioat_chan->desc_lock); | |
858 | ||
859 | pci_pool_free(ioatdma_device->completion_pool, | |
860 | ioat_chan->completion_virt, | |
861 | ioat_chan->completion_addr); | |
862 | ||
863 | /* one is ok since we left it on there on purpose */ | |
864 | if (in_use_descs > 1) | |
865 | dev_err(&ioat_chan->device->pdev->dev, | |
866 | "Freeing %d in use descriptors!\n", | |
867 | in_use_descs - 1); | |
868 | ||
869 | ioat_chan->last_completion = ioat_chan->completion_addr = 0; | |
870 | ioat_chan->pending = 0; | |
871 | ioat_chan->dmacount = 0; | |
872 | ioat_chan->watchdog_completion = 0; | |
873 | ioat_chan->last_compl_desc_addr_hw = 0; | |
874 | ioat_chan->watchdog_tcp_cookie = | |
875 | ioat_chan->watchdog_last_tcp_cookie = 0; | |
876 | } | |
877 | ||
878 | /** | |
879 | * ioat_dma_get_next_descriptor - return the next available descriptor | |
880 | * @ioat_chan: IOAT DMA channel handle | |
881 | * | |
882 | * Gets the next descriptor from the chain, and must be called with the | |
883 | * channel's desc_lock held. Allocates more descriptors if the channel | |
884 | * has run out. | |
885 | */ | |
886 | static struct ioat_desc_sw * | |
887 | ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |
888 | { | |
889 | struct ioat_desc_sw *new; | |
890 | ||
891 | if (!list_empty(&ioat_chan->free_desc)) { | |
892 | new = to_ioat_desc(ioat_chan->free_desc.next); | |
893 | list_del(&new->node); | |
894 | } else { | |
895 | /* try to get another desc */ | |
896 | new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | |
897 | if (!new) { | |
898 | dev_err(&ioat_chan->device->pdev->dev, | |
899 | "alloc failed\n"); | |
900 | return NULL; | |
901 | } | |
902 | } | |
903 | ||
904 | prefetch(new->hw); | |
905 | return new; | |
906 | } | |
907 | ||
908 | static struct ioat_desc_sw * | |
909 | ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) | |
910 | { | |
911 | struct ioat_desc_sw *new; | |
912 | ||
913 | /* | |
914 | * used.prev points to where to start processing | |
915 | * used.next points to next free descriptor | |
916 | * if used.prev == NULL, there are none waiting to be processed | |
917 | * if used.next == used.prev.prev, there is only one free descriptor, | |
918 | * and we need to use it to as a noop descriptor before | |
919 | * linking in a new set of descriptors, since the device | |
920 | * has probably already read the pointer to it | |
921 | */ | |
922 | if (ioat_chan->used_desc.prev && | |
923 | ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { | |
924 | ||
925 | struct ioat_desc_sw *desc; | |
926 | struct ioat_desc_sw *noop_desc; | |
927 | int i; | |
928 | ||
929 | /* set up the noop descriptor */ | |
930 | noop_desc = to_ioat_desc(ioat_chan->used_desc.next); | |
931 | /* set size to non-zero value (channel returns error when size is 0) */ | |
932 | noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; | |
933 | noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; | |
934 | noop_desc->hw->src_addr = 0; | |
935 | noop_desc->hw->dst_addr = 0; | |
936 | ||
937 | ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; | |
938 | ioat_chan->pending++; | |
939 | ioat_chan->dmacount++; | |
940 | ||
941 | /* try to get a few more descriptors */ | |
942 | for (i = 16; i; i--) { | |
943 | desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); | |
944 | if (!desc) { | |
945 | dev_err(&ioat_chan->device->pdev->dev, | |
946 | "alloc failed\n"); | |
947 | break; | |
948 | } | |
949 | list_add_tail(&desc->node, ioat_chan->used_desc.next); | |
950 | ||
951 | desc->hw->next | |
952 | = to_ioat_desc(desc->node.next)->async_tx.phys; | |
953 | to_ioat_desc(desc->node.prev)->hw->next | |
954 | = desc->async_tx.phys; | |
955 | ioat_chan->desccount++; | |
956 | } | |
957 | ||
958 | ioat_chan->used_desc.next = noop_desc->node.next; | |
959 | } | |
960 | new = to_ioat_desc(ioat_chan->used_desc.next); | |
961 | prefetch(new); | |
962 | ioat_chan->used_desc.next = new->node.next; | |
963 | ||
964 | if (ioat_chan->used_desc.prev == NULL) | |
965 | ioat_chan->used_desc.prev = &new->node; | |
966 | ||
967 | prefetch(new->hw); | |
968 | return new; | |
969 | } | |
970 | ||
971 | static struct ioat_desc_sw *ioat_dma_get_next_descriptor( | |
972 | struct ioat_dma_chan *ioat_chan) | |
973 | { | |
974 | if (!ioat_chan) | |
975 | return NULL; | |
976 | ||
977 | switch (ioat_chan->device->version) { | |
978 | case IOAT_VER_1_2: | |
979 | return ioat1_dma_get_next_descriptor(ioat_chan); | |
980 | break; | |
981 | case IOAT_VER_2_0: | |
982 | case IOAT_VER_3_0: | |
983 | return ioat2_dma_get_next_descriptor(ioat_chan); | |
984 | break; | |
985 | } | |
986 | return NULL; | |
987 | } | |
988 | ||
989 | static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |
990 | struct dma_chan *chan, | |
991 | dma_addr_t dma_dest, | |
992 | dma_addr_t dma_src, | |
993 | size_t len, | |
994 | unsigned long flags) | |
995 | { | |
996 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
997 | struct ioat_desc_sw *new; | |
998 | ||
999 | spin_lock_bh(&ioat_chan->desc_lock); | |
1000 | new = ioat_dma_get_next_descriptor(ioat_chan); | |
1001 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1002 | ||
1003 | if (new) { | |
1004 | new->len = len; | |
1005 | new->dst = dma_dest; | |
1006 | new->src = dma_src; | |
1007 | new->async_tx.flags = flags; | |
1008 | return &new->async_tx; | |
1009 | } else { | |
1010 | dev_err(&ioat_chan->device->pdev->dev, | |
1011 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | |
1012 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
1013 | return NULL; | |
1014 | } | |
1015 | } | |
1016 | ||
1017 | static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |
1018 | struct dma_chan *chan, | |
1019 | dma_addr_t dma_dest, | |
1020 | dma_addr_t dma_src, | |
1021 | size_t len, | |
1022 | unsigned long flags) | |
1023 | { | |
1024 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
1025 | struct ioat_desc_sw *new; | |
1026 | ||
1027 | spin_lock_bh(&ioat_chan->desc_lock); | |
1028 | new = ioat2_dma_get_next_descriptor(ioat_chan); | |
1029 | ||
1030 | /* | |
1031 | * leave ioat_chan->desc_lock set in ioat 2 path | |
1032 | * it will get unlocked at end of tx_submit | |
1033 | */ | |
1034 | ||
1035 | if (new) { | |
1036 | new->len = len; | |
1037 | new->dst = dma_dest; | |
1038 | new->src = dma_src; | |
1039 | new->async_tx.flags = flags; | |
1040 | return &new->async_tx; | |
1041 | } else { | |
1042 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1043 | dev_err(&ioat_chan->device->pdev->dev, | |
1044 | "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", | |
1045 | chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); | |
1046 | return NULL; | |
1047 | } | |
1048 | } | |
1049 | ||
1050 | static void ioat_dma_cleanup_tasklet(unsigned long data) | |
1051 | { | |
1052 | struct ioat_dma_chan *chan = (void *)data; | |
1053 | ioat_dma_memcpy_cleanup(chan); | |
1054 | writew(IOAT_CHANCTRL_INT_DISABLE, | |
1055 | chan->reg_base + IOAT_CHANCTRL_OFFSET); | |
1056 | } | |
1057 | ||
1058 | static void | |
1059 | ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) | |
1060 | { | |
1061 | /* | |
1062 | * yes we are unmapping both _page and _single | |
1063 | * alloc'd regions with unmap_page. Is this | |
1064 | * *really* that bad? | |
1065 | */ | |
1066 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
1067 | pci_unmap_page(ioat_chan->device->pdev, | |
1068 | pci_unmap_addr(desc, dst), | |
1069 | pci_unmap_len(desc, len), | |
1070 | PCI_DMA_FROMDEVICE); | |
1071 | ||
1072 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) | |
1073 | pci_unmap_page(ioat_chan->device->pdev, | |
1074 | pci_unmap_addr(desc, src), | |
1075 | pci_unmap_len(desc, len), | |
1076 | PCI_DMA_TODEVICE); | |
1077 | } | |
1078 | ||
1079 | /** | |
1080 | * ioat_dma_memcpy_cleanup - cleanup up finished descriptors | |
1081 | * @chan: ioat channel to be cleaned up | |
1082 | */ | |
1083 | static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) | |
1084 | { | |
1085 | unsigned long phys_complete; | |
1086 | struct ioat_desc_sw *desc, *_desc; | |
1087 | dma_cookie_t cookie = 0; | |
1088 | unsigned long desc_phys; | |
1089 | struct ioat_desc_sw *latest_desc; | |
1090 | ||
1091 | prefetch(ioat_chan->completion_virt); | |
1092 | ||
1093 | if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) | |
1094 | return; | |
1095 | ||
1096 | /* The completion writeback can happen at any time, | |
1097 | so reads by the driver need to be atomic operations | |
1098 | The descriptor physical addresses are limited to 32-bits | |
1099 | when the CPU can only do a 32-bit mov */ | |
1100 | ||
1101 | #if (BITS_PER_LONG == 64) | |
1102 | phys_complete = | |
1103 | ioat_chan->completion_virt->full | |
1104 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
1105 | #else | |
1106 | phys_complete = | |
1107 | ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; | |
1108 | #endif | |
1109 | ||
1110 | if ((ioat_chan->completion_virt->full | |
1111 | & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == | |
1112 | IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { | |
1113 | dev_err(&ioat_chan->device->pdev->dev, | |
1114 | "Channel halted, chanerr = %x\n", | |
1115 | readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); | |
1116 | ||
1117 | /* TODO do something to salvage the situation */ | |
1118 | } | |
1119 | ||
1120 | if (phys_complete == ioat_chan->last_completion) { | |
1121 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
1122 | /* | |
1123 | * perhaps we're stuck so hard that the watchdog can't go off? | |
1124 | * try to catch it after 2 seconds | |
1125 | */ | |
1126 | if (ioat_chan->device->version != IOAT_VER_3_0) { | |
1127 | if (time_after(jiffies, | |
1128 | ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { | |
1129 | ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); | |
1130 | ioat_chan->last_completion_time = jiffies; | |
1131 | } | |
1132 | } | |
1133 | return; | |
1134 | } | |
1135 | ioat_chan->last_completion_time = jiffies; | |
1136 | ||
1137 | cookie = 0; | |
1138 | if (!spin_trylock_bh(&ioat_chan->desc_lock)) { | |
1139 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
1140 | return; | |
1141 | } | |
1142 | ||
1143 | switch (ioat_chan->device->version) { | |
1144 | case IOAT_VER_1_2: | |
1145 | list_for_each_entry_safe(desc, _desc, | |
1146 | &ioat_chan->used_desc, node) { | |
1147 | ||
1148 | /* | |
1149 | * Incoming DMA requests may use multiple descriptors, | |
1150 | * due to exceeding xfercap, perhaps. If so, only the | |
1151 | * last one will have a cookie, and require unmapping. | |
1152 | */ | |
1153 | if (desc->async_tx.cookie) { | |
1154 | cookie = desc->async_tx.cookie; | |
1155 | ioat_dma_unmap(ioat_chan, desc); | |
1156 | if (desc->async_tx.callback) { | |
1157 | desc->async_tx.callback(desc->async_tx.callback_param); | |
1158 | desc->async_tx.callback = NULL; | |
1159 | } | |
1160 | } | |
1161 | ||
1162 | if (desc->async_tx.phys != phys_complete) { | |
1163 | /* | |
1164 | * a completed entry, but not the last, so clean | |
1165 | * up if the client is done with the descriptor | |
1166 | */ | |
1167 | if (async_tx_test_ack(&desc->async_tx)) { | |
1168 | list_del(&desc->node); | |
1169 | list_add_tail(&desc->node, | |
1170 | &ioat_chan->free_desc); | |
1171 | } else | |
1172 | desc->async_tx.cookie = 0; | |
1173 | } else { | |
1174 | /* | |
1175 | * last used desc. Do not remove, so we can | |
1176 | * append from it, but don't look at it next | |
1177 | * time, either | |
1178 | */ | |
1179 | desc->async_tx.cookie = 0; | |
1180 | ||
1181 | /* TODO check status bits? */ | |
1182 | break; | |
1183 | } | |
1184 | } | |
1185 | break; | |
1186 | case IOAT_VER_2_0: | |
1187 | case IOAT_VER_3_0: | |
1188 | /* has some other thread has already cleaned up? */ | |
1189 | if (ioat_chan->used_desc.prev == NULL) | |
1190 | break; | |
1191 | ||
1192 | /* work backwards to find latest finished desc */ | |
1193 | desc = to_ioat_desc(ioat_chan->used_desc.next); | |
1194 | latest_desc = NULL; | |
1195 | do { | |
1196 | desc = to_ioat_desc(desc->node.prev); | |
1197 | desc_phys = (unsigned long)desc->async_tx.phys | |
1198 | & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
1199 | if (desc_phys == phys_complete) { | |
1200 | latest_desc = desc; | |
1201 | break; | |
1202 | } | |
1203 | } while (&desc->node != ioat_chan->used_desc.prev); | |
1204 | ||
1205 | if (latest_desc != NULL) { | |
1206 | ||
1207 | /* work forwards to clear finished descriptors */ | |
1208 | for (desc = to_ioat_desc(ioat_chan->used_desc.prev); | |
1209 | &desc->node != latest_desc->node.next && | |
1210 | &desc->node != ioat_chan->used_desc.next; | |
1211 | desc = to_ioat_desc(desc->node.next)) { | |
1212 | if (desc->async_tx.cookie) { | |
1213 | cookie = desc->async_tx.cookie; | |
1214 | desc->async_tx.cookie = 0; | |
1215 | ioat_dma_unmap(ioat_chan, desc); | |
1216 | if (desc->async_tx.callback) { | |
1217 | desc->async_tx.callback(desc->async_tx.callback_param); | |
1218 | desc->async_tx.callback = NULL; | |
1219 | } | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | /* move used.prev up beyond those that are finished */ | |
1224 | if (&desc->node == ioat_chan->used_desc.next) | |
1225 | ioat_chan->used_desc.prev = NULL; | |
1226 | else | |
1227 | ioat_chan->used_desc.prev = &desc->node; | |
1228 | } | |
1229 | break; | |
1230 | } | |
1231 | ||
1232 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1233 | ||
1234 | ioat_chan->last_completion = phys_complete; | |
1235 | if (cookie != 0) | |
1236 | ioat_chan->completed_cookie = cookie; | |
1237 | ||
1238 | spin_unlock_bh(&ioat_chan->cleanup_lock); | |
1239 | } | |
1240 | ||
1241 | /** | |
1242 | * ioat_dma_is_complete - poll the status of a IOAT DMA transaction | |
1243 | * @chan: IOAT DMA channel handle | |
1244 | * @cookie: DMA transaction identifier | |
1245 | * @done: if not %NULL, updated with last completed transaction | |
1246 | * @used: if not %NULL, updated with last used transaction | |
1247 | */ | |
1248 | static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, | |
1249 | dma_cookie_t cookie, | |
1250 | dma_cookie_t *done, | |
1251 | dma_cookie_t *used) | |
1252 | { | |
1253 | struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); | |
1254 | dma_cookie_t last_used; | |
1255 | dma_cookie_t last_complete; | |
1256 | enum dma_status ret; | |
1257 | ||
1258 | last_used = chan->cookie; | |
1259 | last_complete = ioat_chan->completed_cookie; | |
1260 | ioat_chan->watchdog_tcp_cookie = cookie; | |
1261 | ||
1262 | if (done) | |
1263 | *done = last_complete; | |
1264 | if (used) | |
1265 | *used = last_used; | |
1266 | ||
1267 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
1268 | if (ret == DMA_SUCCESS) | |
1269 | return ret; | |
1270 | ||
1271 | ioat_dma_memcpy_cleanup(ioat_chan); | |
1272 | ||
1273 | last_used = chan->cookie; | |
1274 | last_complete = ioat_chan->completed_cookie; | |
1275 | ||
1276 | if (done) | |
1277 | *done = last_complete; | |
1278 | if (used) | |
1279 | *used = last_used; | |
1280 | ||
1281 | return dma_async_is_complete(cookie, last_complete, last_used); | |
1282 | } | |
1283 | ||
1284 | static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) | |
1285 | { | |
1286 | struct ioat_desc_sw *desc; | |
1287 | ||
1288 | spin_lock_bh(&ioat_chan->desc_lock); | |
1289 | ||
1290 | desc = ioat_dma_get_next_descriptor(ioat_chan); | |
1291 | ||
1292 | if (!desc) { | |
1293 | dev_err(&ioat_chan->device->pdev->dev, | |
1294 | "Unable to start null desc - get next desc failed\n"); | |
1295 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1296 | return; | |
1297 | } | |
1298 | ||
1299 | desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL | |
1300 | | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | |
1301 | | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; | |
1302 | /* set size to non-zero value (channel returns error when size is 0) */ | |
1303 | desc->hw->size = NULL_DESC_BUFFER_SIZE; | |
1304 | desc->hw->src_addr = 0; | |
1305 | desc->hw->dst_addr = 0; | |
1306 | async_tx_ack(&desc->async_tx); | |
1307 | switch (ioat_chan->device->version) { | |
1308 | case IOAT_VER_1_2: | |
1309 | desc->hw->next = 0; | |
1310 | list_add_tail(&desc->node, &ioat_chan->used_desc); | |
1311 | ||
1312 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
1313 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | |
1314 | writel(((u64) desc->async_tx.phys) >> 32, | |
1315 | ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | |
1316 | ||
1317 | writeb(IOAT_CHANCMD_START, ioat_chan->reg_base | |
1318 | + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); | |
1319 | break; | |
1320 | case IOAT_VER_2_0: | |
1321 | case IOAT_VER_3_0: | |
1322 | writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, | |
1323 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
1324 | writel(((u64) desc->async_tx.phys) >> 32, | |
1325 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
1326 | ||
1327 | ioat_chan->dmacount++; | |
1328 | __ioat2_dma_memcpy_issue_pending(ioat_chan); | |
1329 | break; | |
1330 | } | |
1331 | spin_unlock_bh(&ioat_chan->desc_lock); | |
1332 | } | |
1333 | ||
1334 | /* | |
1335 | * Perform a IOAT transaction to verify the HW works. | |
1336 | */ | |
1337 | #define IOAT_TEST_SIZE 2000 | |
1338 | ||
1339 | static void ioat_dma_test_callback(void *dma_async_param) | |
1340 | { | |
1341 | printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", | |
1342 | dma_async_param); | |
1343 | } | |
1344 | ||
1345 | /** | |
1346 | * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. | |
1347 | * @device: device to be tested | |
1348 | */ | |
1349 | static int ioat_dma_self_test(struct ioatdma_device *device) | |
1350 | { | |
1351 | int i; | |
1352 | u8 *src; | |
1353 | u8 *dest; | |
1354 | struct dma_chan *dma_chan; | |
1355 | struct dma_async_tx_descriptor *tx; | |
1356 | dma_addr_t dma_dest, dma_src; | |
1357 | dma_cookie_t cookie; | |
1358 | int err = 0; | |
1359 | ||
1360 | src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | |
1361 | if (!src) | |
1362 | return -ENOMEM; | |
1363 | dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); | |
1364 | if (!dest) { | |
1365 | kfree(src); | |
1366 | return -ENOMEM; | |
1367 | } | |
1368 | ||
1369 | /* Fill in src buffer */ | |
1370 | for (i = 0; i < IOAT_TEST_SIZE; i++) | |
1371 | src[i] = (u8)i; | |
1372 | ||
1373 | /* Start copy, using first DMA channel */ | |
1374 | dma_chan = container_of(device->common.channels.next, | |
1375 | struct dma_chan, | |
1376 | device_node); | |
1377 | if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { | |
1378 | dev_err(&device->pdev->dev, | |
1379 | "selftest cannot allocate chan resource\n"); | |
1380 | err = -ENODEV; | |
1381 | goto out; | |
1382 | } | |
1383 | ||
1384 | dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, | |
1385 | DMA_TO_DEVICE); | |
1386 | dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, | |
1387 | DMA_FROM_DEVICE); | |
1388 | tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, | |
1389 | IOAT_TEST_SIZE, 0); | |
1390 | if (!tx) { | |
1391 | dev_err(&device->pdev->dev, | |
1392 | "Self-test prep failed, disabling\n"); | |
1393 | err = -ENODEV; | |
1394 | goto free_resources; | |
1395 | } | |
1396 | ||
1397 | async_tx_ack(tx); | |
1398 | tx->callback = ioat_dma_test_callback; | |
1399 | tx->callback_param = (void *)0x8086; | |
1400 | cookie = tx->tx_submit(tx); | |
1401 | if (cookie < 0) { | |
1402 | dev_err(&device->pdev->dev, | |
1403 | "Self-test setup failed, disabling\n"); | |
1404 | err = -ENODEV; | |
1405 | goto free_resources; | |
1406 | } | |
1407 | device->common.device_issue_pending(dma_chan); | |
1408 | msleep(1); | |
1409 | ||
1410 | if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) | |
1411 | != DMA_SUCCESS) { | |
1412 | dev_err(&device->pdev->dev, | |
1413 | "Self-test copy timed out, disabling\n"); | |
1414 | err = -ENODEV; | |
1415 | goto free_resources; | |
1416 | } | |
1417 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | |
1418 | dev_err(&device->pdev->dev, | |
1419 | "Self-test copy failed compare, disabling\n"); | |
1420 | err = -ENODEV; | |
1421 | goto free_resources; | |
1422 | } | |
1423 | ||
1424 | free_resources: | |
1425 | device->common.device_free_chan_resources(dma_chan); | |
1426 | out: | |
1427 | kfree(src); | |
1428 | kfree(dest); | |
1429 | return err; | |
1430 | } | |
1431 | ||
1432 | static char ioat_interrupt_style[32] = "msix"; | |
1433 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | |
1434 | sizeof(ioat_interrupt_style), 0644); | |
1435 | MODULE_PARM_DESC(ioat_interrupt_style, | |
1436 | "set ioat interrupt style: msix (default), " | |
1437 | "msix-single-vector, msi, intx)"); | |
1438 | ||
1439 | /** | |
1440 | * ioat_dma_setup_interrupts - setup interrupt handler | |
1441 | * @device: ioat device | |
1442 | */ | |
1443 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | |
1444 | { | |
1445 | struct ioat_dma_chan *ioat_chan; | |
1446 | int err, i, j, msixcnt; | |
1447 | u8 intrctrl = 0; | |
1448 | ||
1449 | if (!strcmp(ioat_interrupt_style, "msix")) | |
1450 | goto msix; | |
1451 | if (!strcmp(ioat_interrupt_style, "msix-single-vector")) | |
1452 | goto msix_single_vector; | |
1453 | if (!strcmp(ioat_interrupt_style, "msi")) | |
1454 | goto msi; | |
1455 | if (!strcmp(ioat_interrupt_style, "intx")) | |
1456 | goto intx; | |
1457 | dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", | |
1458 | ioat_interrupt_style); | |
1459 | goto err_no_irq; | |
1460 | ||
1461 | msix: | |
1462 | /* The number of MSI-X vectors should equal the number of channels */ | |
1463 | msixcnt = device->common.chancnt; | |
1464 | for (i = 0; i < msixcnt; i++) | |
1465 | device->msix_entries[i].entry = i; | |
1466 | ||
1467 | err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); | |
1468 | if (err < 0) | |
1469 | goto msi; | |
1470 | if (err > 0) | |
1471 | goto msix_single_vector; | |
1472 | ||
1473 | for (i = 0; i < msixcnt; i++) { | |
1474 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
1475 | err = request_irq(device->msix_entries[i].vector, | |
1476 | ioat_dma_do_interrupt_msix, | |
1477 | 0, "ioat-msix", ioat_chan); | |
1478 | if (err) { | |
1479 | for (j = 0; j < i; j++) { | |
1480 | ioat_chan = | |
1481 | ioat_lookup_chan_by_index(device, j); | |
1482 | free_irq(device->msix_entries[j].vector, | |
1483 | ioat_chan); | |
1484 | } | |
1485 | goto msix_single_vector; | |
1486 | } | |
1487 | } | |
1488 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | |
1489 | device->irq_mode = msix_multi_vector; | |
1490 | goto done; | |
1491 | ||
1492 | msix_single_vector: | |
1493 | device->msix_entries[0].entry = 0; | |
1494 | err = pci_enable_msix(device->pdev, device->msix_entries, 1); | |
1495 | if (err) | |
1496 | goto msi; | |
1497 | ||
1498 | err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, | |
1499 | 0, "ioat-msix", device); | |
1500 | if (err) { | |
1501 | pci_disable_msix(device->pdev); | |
1502 | goto msi; | |
1503 | } | |
1504 | device->irq_mode = msix_single_vector; | |
1505 | goto done; | |
1506 | ||
1507 | msi: | |
1508 | err = pci_enable_msi(device->pdev); | |
1509 | if (err) | |
1510 | goto intx; | |
1511 | ||
1512 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | |
1513 | 0, "ioat-msi", device); | |
1514 | if (err) { | |
1515 | pci_disable_msi(device->pdev); | |
1516 | goto intx; | |
1517 | } | |
1518 | /* | |
1519 | * CB 1.2 devices need a bit set in configuration space to enable MSI | |
1520 | */ | |
1521 | if (device->version == IOAT_VER_1_2) { | |
1522 | u32 dmactrl; | |
1523 | pci_read_config_dword(device->pdev, | |
1524 | IOAT_PCI_DMACTRL_OFFSET, &dmactrl); | |
1525 | dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; | |
1526 | pci_write_config_dword(device->pdev, | |
1527 | IOAT_PCI_DMACTRL_OFFSET, dmactrl); | |
1528 | } | |
1529 | device->irq_mode = msi; | |
1530 | goto done; | |
1531 | ||
1532 | intx: | |
1533 | err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, | |
1534 | IRQF_SHARED, "ioat-intx", device); | |
1535 | if (err) | |
1536 | goto err_no_irq; | |
1537 | device->irq_mode = intx; | |
1538 | ||
1539 | done: | |
1540 | intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; | |
1541 | writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1542 | return 0; | |
1543 | ||
1544 | err_no_irq: | |
1545 | /* Disable all interrupt generation */ | |
1546 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1547 | dev_err(&device->pdev->dev, "no usable interrupts\n"); | |
1548 | device->irq_mode = none; | |
1549 | return -1; | |
1550 | } | |
1551 | ||
1552 | /** | |
1553 | * ioat_dma_remove_interrupts - remove whatever interrupts were set | |
1554 | * @device: ioat device | |
1555 | */ | |
1556 | static void ioat_dma_remove_interrupts(struct ioatdma_device *device) | |
1557 | { | |
1558 | struct ioat_dma_chan *ioat_chan; | |
1559 | int i; | |
1560 | ||
1561 | /* Disable all interrupt generation */ | |
1562 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | |
1563 | ||
1564 | switch (device->irq_mode) { | |
1565 | case msix_multi_vector: | |
1566 | for (i = 0; i < device->common.chancnt; i++) { | |
1567 | ioat_chan = ioat_lookup_chan_by_index(device, i); | |
1568 | free_irq(device->msix_entries[i].vector, ioat_chan); | |
1569 | } | |
1570 | pci_disable_msix(device->pdev); | |
1571 | break; | |
1572 | case msix_single_vector: | |
1573 | free_irq(device->msix_entries[0].vector, device); | |
1574 | pci_disable_msix(device->pdev); | |
1575 | break; | |
1576 | case msi: | |
1577 | free_irq(device->pdev->irq, device); | |
1578 | pci_disable_msi(device->pdev); | |
1579 | break; | |
1580 | case intx: | |
1581 | free_irq(device->pdev->irq, device); | |
1582 | break; | |
1583 | case none: | |
1584 | dev_warn(&device->pdev->dev, | |
1585 | "call to %s without interrupts setup\n", __func__); | |
1586 | } | |
1587 | device->irq_mode = none; | |
1588 | } | |
1589 | ||
1590 | struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, | |
1591 | void __iomem *iobase) | |
1592 | { | |
1593 | int err; | |
1594 | struct ioatdma_device *device; | |
1595 | ||
1596 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
1597 | if (!device) { | |
1598 | err = -ENOMEM; | |
1599 | goto err_kzalloc; | |
1600 | } | |
1601 | device->pdev = pdev; | |
1602 | device->reg_base = iobase; | |
1603 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | |
1604 | ||
1605 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1606 | device->dma_pool = pci_pool_create("dma_desc_pool", pdev, | |
1607 | sizeof(struct ioat_dma_descriptor), | |
1608 | 64, 0); | |
1609 | if (!device->dma_pool) { | |
1610 | err = -ENOMEM; | |
1611 | goto err_dma_pool; | |
1612 | } | |
1613 | ||
1614 | device->completion_pool = pci_pool_create("completion_pool", pdev, | |
1615 | sizeof(u64), SMP_CACHE_BYTES, | |
1616 | SMP_CACHE_BYTES); | |
1617 | if (!device->completion_pool) { | |
1618 | err = -ENOMEM; | |
1619 | goto err_completion_pool; | |
1620 | } | |
1621 | ||
1622 | INIT_LIST_HEAD(&device->common.channels); | |
1623 | ioat_dma_enumerate_channels(device); | |
1624 | ||
1625 | device->common.device_alloc_chan_resources = | |
1626 | ioat_dma_alloc_chan_resources; | |
1627 | device->common.device_free_chan_resources = | |
1628 | ioat_dma_free_chan_resources; | |
1629 | device->common.dev = &pdev->dev; | |
1630 | ||
1631 | dma_cap_set(DMA_MEMCPY, device->common.cap_mask); | |
1632 | device->common.device_is_tx_complete = ioat_dma_is_complete; | |
1633 | switch (device->version) { | |
1634 | case IOAT_VER_1_2: | |
1635 | device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; | |
1636 | device->common.device_issue_pending = | |
1637 | ioat1_dma_memcpy_issue_pending; | |
1638 | break; | |
1639 | case IOAT_VER_2_0: | |
1640 | case IOAT_VER_3_0: | |
1641 | device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; | |
1642 | device->common.device_issue_pending = | |
1643 | ioat2_dma_memcpy_issue_pending; | |
1644 | break; | |
1645 | } | |
1646 | ||
1647 | dev_err(&device->pdev->dev, | |
1648 | "Intel(R) I/OAT DMA Engine found," | |
1649 | " %d channels, device version 0x%02x, driver version %s\n", | |
1650 | device->common.chancnt, device->version, IOAT_DMA_VERSION); | |
1651 | ||
1652 | err = ioat_dma_setup_interrupts(device); | |
1653 | if (err) | |
1654 | goto err_setup_interrupts; | |
1655 | ||
1656 | err = ioat_dma_self_test(device); | |
1657 | if (err) | |
1658 | goto err_self_test; | |
1659 | ||
1660 | ioat_set_tcp_copy_break(device); | |
1661 | ||
1662 | dma_async_device_register(&device->common); | |
1663 | ||
1664 | if (device->version != IOAT_VER_3_0) { | |
1665 | INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); | |
1666 | schedule_delayed_work(&device->work, | |
1667 | WATCHDOG_DELAY); | |
1668 | } | |
1669 | ||
1670 | return device; | |
1671 | ||
1672 | err_self_test: | |
1673 | ioat_dma_remove_interrupts(device); | |
1674 | err_setup_interrupts: | |
1675 | pci_pool_destroy(device->completion_pool); | |
1676 | err_completion_pool: | |
1677 | pci_pool_destroy(device->dma_pool); | |
1678 | err_dma_pool: | |
1679 | kfree(device); | |
1680 | err_kzalloc: | |
1681 | dev_err(&pdev->dev, | |
1682 | "Intel(R) I/OAT DMA Engine initialization failed\n"); | |
1683 | return NULL; | |
1684 | } | |
1685 | ||
1686 | void ioat_dma_remove(struct ioatdma_device *device) | |
1687 | { | |
1688 | struct dma_chan *chan, *_chan; | |
1689 | struct ioat_dma_chan *ioat_chan; | |
1690 | ||
1691 | ioat_dma_remove_interrupts(device); | |
1692 | ||
1693 | dma_async_device_unregister(&device->common); | |
1694 | ||
1695 | pci_pool_destroy(device->dma_pool); | |
1696 | pci_pool_destroy(device->completion_pool); | |
1697 | ||
1698 | iounmap(device->reg_base); | |
1699 | pci_release_regions(device->pdev); | |
1700 | pci_disable_device(device->pdev); | |
1701 | ||
1702 | if (device->version != IOAT_VER_3_0) { | |
1703 | cancel_delayed_work(&device->work); | |
1704 | } | |
1705 | ||
1706 | list_for_each_entry_safe(chan, _chan, | |
1707 | &device->common.channels, device_node) { | |
1708 | ioat_chan = to_ioat_chan(chan); | |
1709 | list_del(&chan->device_node); | |
1710 | kfree(ioat_chan); | |
1711 | } | |
1712 | kfree(device); | |
1713 | } | |
1714 |