]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/dma/ioat/dma.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-artful-kernel.git] / drivers / dma / ioat / dma.c
1 /*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23 /*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/i7300_idle.h>
38 #include "dma.h"
39 #include "registers.h"
40 #include "hw.h"
41
42 int ioat_pending_level = 4;
43 module_param(ioat_pending_level, int, 0644);
44 MODULE_PARM_DESC(ioat_pending_level,
45 "high-water mark for pushing ioat descriptors (default: 4)");
46
47 /* internal functions */
48 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
49 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
50
51 /**
52 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
53 * @irq: interrupt id
54 * @data: interrupt data
55 */
56 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
57 {
58 struct ioatdma_device *instance = data;
59 struct ioat_chan_common *chan;
60 unsigned long attnstatus;
61 int bit;
62 u8 intrctrl;
63
64 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
65
66 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
67 return IRQ_NONE;
68
69 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
70 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
71 return IRQ_NONE;
72 }
73
74 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
75 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
76 chan = ioat_chan_by_index(instance, bit);
77 tasklet_schedule(&chan->cleanup_task);
78 }
79
80 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
81 return IRQ_HANDLED;
82 }
83
84 /**
85 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
86 * @irq: interrupt id
87 * @data: interrupt data
88 */
89 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
90 {
91 struct ioat_chan_common *chan = data;
92
93 tasklet_schedule(&chan->cleanup_task);
94
95 return IRQ_HANDLED;
96 }
97
98 /* common channel initialization */
99 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
100 {
101 struct dma_device *dma = &device->common;
102 struct dma_chan *c = &chan->common;
103 unsigned long data = (unsigned long) c;
104
105 chan->device = device;
106 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
107 spin_lock_init(&chan->cleanup_lock);
108 chan->common.device = dma;
109 list_add_tail(&chan->common.device_node, &dma->channels);
110 device->idx[idx] = chan;
111 init_timer(&chan->timer);
112 chan->timer.function = device->timer_fn;
113 chan->timer.data = data;
114 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
115 tasklet_disable(&chan->cleanup_task);
116 }
117
118 /**
119 * ioat1_dma_enumerate_channels - find and initialize the device's channels
120 * @device: the device to be enumerated
121 */
122 static int ioat1_enumerate_channels(struct ioatdma_device *device)
123 {
124 u8 xfercap_scale;
125 u32 xfercap;
126 int i;
127 struct ioat_dma_chan *ioat;
128 struct device *dev = &device->pdev->dev;
129 struct dma_device *dma = &device->common;
130
131 INIT_LIST_HEAD(&dma->channels);
132 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
133 dma->chancnt &= 0x1f; /* bits [4:0] valid */
134 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
135 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
136 dma->chancnt, ARRAY_SIZE(device->idx));
137 dma->chancnt = ARRAY_SIZE(device->idx);
138 }
139 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
140 xfercap_scale &= 0x1f; /* bits [4:0] valid */
141 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
142 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
143
144 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
145 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
146 dma->chancnt--;
147 #endif
148 for (i = 0; i < dma->chancnt; i++) {
149 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
150 if (!ioat)
151 break;
152
153 ioat_init_channel(device, &ioat->base, i);
154 ioat->xfercap = xfercap;
155 spin_lock_init(&ioat->desc_lock);
156 INIT_LIST_HEAD(&ioat->free_desc);
157 INIT_LIST_HEAD(&ioat->used_desc);
158 }
159 dma->chancnt = i;
160 return i;
161 }
162
163 /**
164 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
165 * descriptors to hw
166 * @chan: DMA channel handle
167 */
168 static inline void
169 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
170 {
171 void __iomem *reg_base = ioat->base.reg_base;
172
173 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
174 __func__, ioat->pending);
175 ioat->pending = 0;
176 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
177 }
178
179 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
180 {
181 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
182
183 if (ioat->pending > 0) {
184 spin_lock_bh(&ioat->desc_lock);
185 __ioat1_dma_memcpy_issue_pending(ioat);
186 spin_unlock_bh(&ioat->desc_lock);
187 }
188 }
189
190 /**
191 * ioat1_reset_channel - restart a channel
192 * @ioat: IOAT DMA channel handle
193 */
194 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
195 {
196 struct ioat_chan_common *chan = &ioat->base;
197 void __iomem *reg_base = chan->reg_base;
198 u32 chansts, chanerr;
199
200 dev_warn(to_dev(chan), "reset\n");
201 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
202 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
203 if (chanerr) {
204 dev_err(to_dev(chan),
205 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
206 chan_num(chan), chansts, chanerr);
207 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
208 }
209
210 /*
211 * whack it upside the head with a reset
212 * and wait for things to settle out.
213 * force the pending count to a really big negative
214 * to make sure no one forces an issue_pending
215 * while we're waiting.
216 */
217
218 ioat->pending = INT_MIN;
219 writeb(IOAT_CHANCMD_RESET,
220 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
221 set_bit(IOAT_RESET_PENDING, &chan->state);
222 mod_timer(&chan->timer, jiffies + RESET_DELAY);
223 }
224
225 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
226 {
227 struct dma_chan *c = tx->chan;
228 struct ioat_dma_chan *ioat = to_ioat_chan(c);
229 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
230 struct ioat_chan_common *chan = &ioat->base;
231 struct ioat_desc_sw *first;
232 struct ioat_desc_sw *chain_tail;
233 dma_cookie_t cookie;
234
235 spin_lock_bh(&ioat->desc_lock);
236 /* cookie incr and addition to used_list must be atomic */
237 cookie = c->cookie;
238 cookie++;
239 if (cookie < 0)
240 cookie = 1;
241 c->cookie = cookie;
242 tx->cookie = cookie;
243 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
244
245 /* write address into NextDescriptor field of last desc in chain */
246 first = to_ioat_desc(desc->tx_list.next);
247 chain_tail = to_ioat_desc(ioat->used_desc.prev);
248 /* make descriptor updates globally visible before chaining */
249 wmb();
250 chain_tail->hw->next = first->txd.phys;
251 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
252 dump_desc_dbg(ioat, chain_tail);
253 dump_desc_dbg(ioat, first);
254
255 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
256 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
257
258 ioat->active += desc->hw->tx_cnt;
259 ioat->pending += desc->hw->tx_cnt;
260 if (ioat->pending >= ioat_pending_level)
261 __ioat1_dma_memcpy_issue_pending(ioat);
262 spin_unlock_bh(&ioat->desc_lock);
263
264 return cookie;
265 }
266
267 /**
268 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
269 * @ioat: the channel supplying the memory pool for the descriptors
270 * @flags: allocation flags
271 */
272 static struct ioat_desc_sw *
273 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
274 {
275 struct ioat_dma_descriptor *desc;
276 struct ioat_desc_sw *desc_sw;
277 struct ioatdma_device *ioatdma_device;
278 dma_addr_t phys;
279
280 ioatdma_device = ioat->base.device;
281 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
282 if (unlikely(!desc))
283 return NULL;
284
285 desc_sw = kzalloc(sizeof(*desc_sw), flags);
286 if (unlikely(!desc_sw)) {
287 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
288 return NULL;
289 }
290
291 memset(desc, 0, sizeof(*desc));
292
293 INIT_LIST_HEAD(&desc_sw->tx_list);
294 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
295 desc_sw->txd.tx_submit = ioat1_tx_submit;
296 desc_sw->hw = desc;
297 desc_sw->txd.phys = phys;
298 set_desc_id(desc_sw, -1);
299
300 return desc_sw;
301 }
302
303 static int ioat_initial_desc_count = 256;
304 module_param(ioat_initial_desc_count, int, 0644);
305 MODULE_PARM_DESC(ioat_initial_desc_count,
306 "ioat1: initial descriptors per channel (default: 256)");
307 /**
308 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
309 * @chan: the channel to be filled out
310 */
311 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
312 {
313 struct ioat_dma_chan *ioat = to_ioat_chan(c);
314 struct ioat_chan_common *chan = &ioat->base;
315 struct ioat_desc_sw *desc;
316 u32 chanerr;
317 int i;
318 LIST_HEAD(tmp_list);
319
320 /* have we already been set up? */
321 if (!list_empty(&ioat->free_desc))
322 return ioat->desccount;
323
324 /* Setup register to interrupt and write completion status on error */
325 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
326
327 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
328 if (chanerr) {
329 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
330 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
331 }
332
333 /* Allocate descriptors */
334 for (i = 0; i < ioat_initial_desc_count; i++) {
335 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
336 if (!desc) {
337 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
338 break;
339 }
340 set_desc_id(desc, i);
341 list_add_tail(&desc->node, &tmp_list);
342 }
343 spin_lock_bh(&ioat->desc_lock);
344 ioat->desccount = i;
345 list_splice(&tmp_list, &ioat->free_desc);
346 spin_unlock_bh(&ioat->desc_lock);
347
348 /* allocate a completion writeback area */
349 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
350 chan->completion = pci_pool_alloc(chan->device->completion_pool,
351 GFP_KERNEL, &chan->completion_dma);
352 memset(chan->completion, 0, sizeof(*chan->completion));
353 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
354 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
355 writel(((u64) chan->completion_dma) >> 32,
356 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
357
358 tasklet_enable(&chan->cleanup_task);
359 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
360 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
361 __func__, ioat->desccount);
362 return ioat->desccount;
363 }
364
365 /**
366 * ioat1_dma_free_chan_resources - release all the descriptors
367 * @chan: the channel to be cleaned
368 */
369 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
370 {
371 struct ioat_dma_chan *ioat = to_ioat_chan(c);
372 struct ioat_chan_common *chan = &ioat->base;
373 struct ioatdma_device *ioatdma_device = chan->device;
374 struct ioat_desc_sw *desc, *_desc;
375 int in_use_descs = 0;
376
377 /* Before freeing channel resources first check
378 * if they have been previously allocated for this channel.
379 */
380 if (ioat->desccount == 0)
381 return;
382
383 tasklet_disable(&chan->cleanup_task);
384 del_timer_sync(&chan->timer);
385 ioat1_cleanup(ioat);
386
387 /* Delay 100ms after reset to allow internal DMA logic to quiesce
388 * before removing DMA descriptor resources.
389 */
390 writeb(IOAT_CHANCMD_RESET,
391 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
392 mdelay(100);
393
394 spin_lock_bh(&ioat->desc_lock);
395 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
396 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
397 __func__, desc_id(desc));
398 dump_desc_dbg(ioat, desc);
399 in_use_descs++;
400 list_del(&desc->node);
401 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
402 desc->txd.phys);
403 kfree(desc);
404 }
405 list_for_each_entry_safe(desc, _desc,
406 &ioat->free_desc, node) {
407 list_del(&desc->node);
408 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
409 desc->txd.phys);
410 kfree(desc);
411 }
412 spin_unlock_bh(&ioat->desc_lock);
413
414 pci_pool_free(ioatdma_device->completion_pool,
415 chan->completion,
416 chan->completion_dma);
417
418 /* one is ok since we left it on there on purpose */
419 if (in_use_descs > 1)
420 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
421 in_use_descs - 1);
422
423 chan->last_completion = 0;
424 chan->completion_dma = 0;
425 ioat->pending = 0;
426 ioat->desccount = 0;
427 }
428
429 /**
430 * ioat1_dma_get_next_descriptor - return the next available descriptor
431 * @ioat: IOAT DMA channel handle
432 *
433 * Gets the next descriptor from the chain, and must be called with the
434 * channel's desc_lock held. Allocates more descriptors if the channel
435 * has run out.
436 */
437 static struct ioat_desc_sw *
438 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
439 {
440 struct ioat_desc_sw *new;
441
442 if (!list_empty(&ioat->free_desc)) {
443 new = to_ioat_desc(ioat->free_desc.next);
444 list_del(&new->node);
445 } else {
446 /* try to get another desc */
447 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
448 if (!new) {
449 dev_err(to_dev(&ioat->base), "alloc failed\n");
450 return NULL;
451 }
452 }
453 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
454 __func__, desc_id(new));
455 prefetch(new->hw);
456 return new;
457 }
458
459 static struct dma_async_tx_descriptor *
460 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
461 dma_addr_t dma_src, size_t len, unsigned long flags)
462 {
463 struct ioat_dma_chan *ioat = to_ioat_chan(c);
464 struct ioat_desc_sw *desc;
465 size_t copy;
466 LIST_HEAD(chain);
467 dma_addr_t src = dma_src;
468 dma_addr_t dest = dma_dest;
469 size_t total_len = len;
470 struct ioat_dma_descriptor *hw = NULL;
471 int tx_cnt = 0;
472
473 spin_lock_bh(&ioat->desc_lock);
474 desc = ioat1_dma_get_next_descriptor(ioat);
475 do {
476 if (!desc)
477 break;
478
479 tx_cnt++;
480 copy = min_t(size_t, len, ioat->xfercap);
481
482 hw = desc->hw;
483 hw->size = copy;
484 hw->ctl = 0;
485 hw->src_addr = src;
486 hw->dst_addr = dest;
487
488 list_add_tail(&desc->node, &chain);
489
490 len -= copy;
491 dest += copy;
492 src += copy;
493 if (len) {
494 struct ioat_desc_sw *next;
495
496 async_tx_ack(&desc->txd);
497 next = ioat1_dma_get_next_descriptor(ioat);
498 hw->next = next ? next->txd.phys : 0;
499 dump_desc_dbg(ioat, desc);
500 desc = next;
501 } else
502 hw->next = 0;
503 } while (len);
504
505 if (!desc) {
506 struct ioat_chan_common *chan = &ioat->base;
507
508 dev_err(to_dev(chan),
509 "chan%d - get_next_desc failed\n", chan_num(chan));
510 list_splice(&chain, &ioat->free_desc);
511 spin_unlock_bh(&ioat->desc_lock);
512 return NULL;
513 }
514 spin_unlock_bh(&ioat->desc_lock);
515
516 desc->txd.flags = flags;
517 desc->len = total_len;
518 list_splice(&chain, &desc->tx_list);
519 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
520 hw->ctl_f.compl_write = 1;
521 hw->tx_cnt = tx_cnt;
522 dump_desc_dbg(ioat, desc);
523
524 return &desc->txd;
525 }
526
527 static void ioat1_cleanup_event(unsigned long data)
528 {
529 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
530
531 ioat1_cleanup(ioat);
532 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
533 }
534
535 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
536 size_t len, struct ioat_dma_descriptor *hw)
537 {
538 struct pci_dev *pdev = chan->device->pdev;
539 size_t offset = len - hw->size;
540
541 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
542 ioat_unmap(pdev, hw->dst_addr - offset, len,
543 PCI_DMA_FROMDEVICE, flags, 1);
544
545 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
546 ioat_unmap(pdev, hw->src_addr - offset, len,
547 PCI_DMA_TODEVICE, flags, 0);
548 }
549
550 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
551 {
552 unsigned long phys_complete;
553 u64 completion;
554
555 completion = *chan->completion;
556 phys_complete = ioat_chansts_to_addr(completion);
557
558 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
559 (unsigned long long) phys_complete);
560
561 if (is_ioat_halted(completion)) {
562 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
563 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
564 chanerr);
565
566 /* TODO do something to salvage the situation */
567 }
568
569 return phys_complete;
570 }
571
572 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
573 unsigned long *phys_complete)
574 {
575 *phys_complete = ioat_get_current_completion(chan);
576 if (*phys_complete == chan->last_completion)
577 return false;
578 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
579 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
580
581 return true;
582 }
583
584 static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
585 {
586 struct ioat_chan_common *chan = &ioat->base;
587 struct list_head *_desc, *n;
588 struct dma_async_tx_descriptor *tx;
589
590 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
591 __func__, phys_complete);
592 list_for_each_safe(_desc, n, &ioat->used_desc) {
593 struct ioat_desc_sw *desc;
594
595 prefetch(n);
596 desc = list_entry(_desc, typeof(*desc), node);
597 tx = &desc->txd;
598 /*
599 * Incoming DMA requests may use multiple descriptors,
600 * due to exceeding xfercap, perhaps. If so, only the
601 * last one will have a cookie, and require unmapping.
602 */
603 dump_desc_dbg(ioat, desc);
604 if (tx->cookie) {
605 chan->completed_cookie = tx->cookie;
606 tx->cookie = 0;
607 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
608 ioat->active -= desc->hw->tx_cnt;
609 if (tx->callback) {
610 tx->callback(tx->callback_param);
611 tx->callback = NULL;
612 }
613 }
614
615 if (tx->phys != phys_complete) {
616 /*
617 * a completed entry, but not the last, so clean
618 * up if the client is done with the descriptor
619 */
620 if (async_tx_test_ack(tx))
621 list_move_tail(&desc->node, &ioat->free_desc);
622 } else {
623 /*
624 * last used desc. Do not remove, so we can
625 * append from it.
626 */
627
628 /* if nothing else is pending, cancel the
629 * completion timeout
630 */
631 if (n == &ioat->used_desc) {
632 dev_dbg(to_dev(chan),
633 "%s cancel completion timeout\n",
634 __func__);
635 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
636 }
637
638 /* TODO check status bits? */
639 break;
640 }
641 }
642
643 chan->last_completion = phys_complete;
644 }
645
646 /**
647 * ioat1_cleanup - cleanup up finished descriptors
648 * @chan: ioat channel to be cleaned up
649 *
650 * To prevent lock contention we defer cleanup when the locks are
651 * contended with a terminal timeout that forces cleanup and catches
652 * completion notification errors.
653 */
654 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
655 {
656 struct ioat_chan_common *chan = &ioat->base;
657 unsigned long phys_complete;
658
659 prefetch(chan->completion);
660
661 if (!spin_trylock_bh(&chan->cleanup_lock))
662 return;
663
664 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
665 spin_unlock_bh(&chan->cleanup_lock);
666 return;
667 }
668
669 if (!spin_trylock_bh(&ioat->desc_lock)) {
670 spin_unlock_bh(&chan->cleanup_lock);
671 return;
672 }
673
674 __cleanup(ioat, phys_complete);
675
676 spin_unlock_bh(&ioat->desc_lock);
677 spin_unlock_bh(&chan->cleanup_lock);
678 }
679
680 static void ioat1_timer_event(unsigned long data)
681 {
682 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
683 struct ioat_chan_common *chan = &ioat->base;
684
685 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
686
687 spin_lock_bh(&chan->cleanup_lock);
688 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
689 struct ioat_desc_sw *desc;
690
691 spin_lock_bh(&ioat->desc_lock);
692
693 /* restart active descriptors */
694 desc = to_ioat_desc(ioat->used_desc.prev);
695 ioat_set_chainaddr(ioat, desc->txd.phys);
696 ioat_start(chan);
697
698 ioat->pending = 0;
699 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
700 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
701 spin_unlock_bh(&ioat->desc_lock);
702 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
703 unsigned long phys_complete;
704
705 spin_lock_bh(&ioat->desc_lock);
706 /* if we haven't made progress and we have already
707 * acknowledged a pending completion once, then be more
708 * forceful with a restart
709 */
710 if (ioat_cleanup_preamble(chan, &phys_complete))
711 __cleanup(ioat, phys_complete);
712 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
713 ioat1_reset_channel(ioat);
714 else {
715 u64 status = ioat_chansts(chan);
716
717 /* manually update the last completion address */
718 if (ioat_chansts_to_addr(status) != 0)
719 *chan->completion = status;
720
721 set_bit(IOAT_COMPLETION_ACK, &chan->state);
722 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
723 }
724 spin_unlock_bh(&ioat->desc_lock);
725 }
726 spin_unlock_bh(&chan->cleanup_lock);
727 }
728
729 enum dma_status
730 ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
731 dma_cookie_t *done, dma_cookie_t *used)
732 {
733 struct ioat_chan_common *chan = to_chan_common(c);
734 struct ioatdma_device *device = chan->device;
735
736 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
737 return DMA_SUCCESS;
738
739 device->cleanup_fn((unsigned long) c);
740
741 return ioat_is_complete(c, cookie, done, used);
742 }
743
744 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
745 {
746 struct ioat_chan_common *chan = &ioat->base;
747 struct ioat_desc_sw *desc;
748 struct ioat_dma_descriptor *hw;
749
750 spin_lock_bh(&ioat->desc_lock);
751
752 desc = ioat1_dma_get_next_descriptor(ioat);
753
754 if (!desc) {
755 dev_err(to_dev(chan),
756 "Unable to start null desc - get next desc failed\n");
757 spin_unlock_bh(&ioat->desc_lock);
758 return;
759 }
760
761 hw = desc->hw;
762 hw->ctl = 0;
763 hw->ctl_f.null = 1;
764 hw->ctl_f.int_en = 1;
765 hw->ctl_f.compl_write = 1;
766 /* set size to non-zero value (channel returns error when size is 0) */
767 hw->size = NULL_DESC_BUFFER_SIZE;
768 hw->src_addr = 0;
769 hw->dst_addr = 0;
770 async_tx_ack(&desc->txd);
771 hw->next = 0;
772 list_add_tail(&desc->node, &ioat->used_desc);
773 dump_desc_dbg(ioat, desc);
774
775 ioat_set_chainaddr(ioat, desc->txd.phys);
776 ioat_start(chan);
777 spin_unlock_bh(&ioat->desc_lock);
778 }
779
780 /*
781 * Perform a IOAT transaction to verify the HW works.
782 */
783 #define IOAT_TEST_SIZE 2000
784
785 static void __devinit ioat_dma_test_callback(void *dma_async_param)
786 {
787 struct completion *cmp = dma_async_param;
788
789 complete(cmp);
790 }
791
792 /**
793 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
794 * @device: device to be tested
795 */
796 int __devinit ioat_dma_self_test(struct ioatdma_device *device)
797 {
798 int i;
799 u8 *src;
800 u8 *dest;
801 struct dma_device *dma = &device->common;
802 struct device *dev = &device->pdev->dev;
803 struct dma_chan *dma_chan;
804 struct dma_async_tx_descriptor *tx;
805 dma_addr_t dma_dest, dma_src;
806 dma_cookie_t cookie;
807 int err = 0;
808 struct completion cmp;
809 unsigned long tmo;
810 unsigned long flags;
811
812 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
813 if (!src)
814 return -ENOMEM;
815 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
816 if (!dest) {
817 kfree(src);
818 return -ENOMEM;
819 }
820
821 /* Fill in src buffer */
822 for (i = 0; i < IOAT_TEST_SIZE; i++)
823 src[i] = (u8)i;
824
825 /* Start copy, using first DMA channel */
826 dma_chan = container_of(dma->channels.next, struct dma_chan,
827 device_node);
828 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
829 dev_err(dev, "selftest cannot allocate chan resource\n");
830 err = -ENODEV;
831 goto out;
832 }
833
834 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
835 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
836 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
837 DMA_PREP_INTERRUPT;
838 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
839 IOAT_TEST_SIZE, flags);
840 if (!tx) {
841 dev_err(dev, "Self-test prep failed, disabling\n");
842 err = -ENODEV;
843 goto free_resources;
844 }
845
846 async_tx_ack(tx);
847 init_completion(&cmp);
848 tx->callback = ioat_dma_test_callback;
849 tx->callback_param = &cmp;
850 cookie = tx->tx_submit(tx);
851 if (cookie < 0) {
852 dev_err(dev, "Self-test setup failed, disabling\n");
853 err = -ENODEV;
854 goto free_resources;
855 }
856 dma->device_issue_pending(dma_chan);
857
858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
859
860 if (tmo == 0 ||
861 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
862 != DMA_SUCCESS) {
863 dev_err(dev, "Self-test copy timed out, disabling\n");
864 err = -ENODEV;
865 goto free_resources;
866 }
867 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
868 dev_err(dev, "Self-test copy failed compare, disabling\n");
869 err = -ENODEV;
870 goto free_resources;
871 }
872
873 free_resources:
874 dma->device_free_chan_resources(dma_chan);
875 out:
876 kfree(src);
877 kfree(dest);
878 return err;
879 }
880
881 static char ioat_interrupt_style[32] = "msix";
882 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
883 sizeof(ioat_interrupt_style), 0644);
884 MODULE_PARM_DESC(ioat_interrupt_style,
885 "set ioat interrupt style: msix (default), "
886 "msix-single-vector, msi, intx)");
887
888 /**
889 * ioat_dma_setup_interrupts - setup interrupt handler
890 * @device: ioat device
891 */
892 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
893 {
894 struct ioat_chan_common *chan;
895 struct pci_dev *pdev = device->pdev;
896 struct device *dev = &pdev->dev;
897 struct msix_entry *msix;
898 int i, j, msixcnt;
899 int err = -EINVAL;
900 u8 intrctrl = 0;
901
902 if (!strcmp(ioat_interrupt_style, "msix"))
903 goto msix;
904 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
905 goto msix_single_vector;
906 if (!strcmp(ioat_interrupt_style, "msi"))
907 goto msi;
908 if (!strcmp(ioat_interrupt_style, "intx"))
909 goto intx;
910 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
911 goto err_no_irq;
912
913 msix:
914 /* The number of MSI-X vectors should equal the number of channels */
915 msixcnt = device->common.chancnt;
916 for (i = 0; i < msixcnt; i++)
917 device->msix_entries[i].entry = i;
918
919 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
920 if (err < 0)
921 goto msi;
922 if (err > 0)
923 goto msix_single_vector;
924
925 for (i = 0; i < msixcnt; i++) {
926 msix = &device->msix_entries[i];
927 chan = ioat_chan_by_index(device, i);
928 err = devm_request_irq(dev, msix->vector,
929 ioat_dma_do_interrupt_msix, 0,
930 "ioat-msix", chan);
931 if (err) {
932 for (j = 0; j < i; j++) {
933 msix = &device->msix_entries[j];
934 chan = ioat_chan_by_index(device, j);
935 devm_free_irq(dev, msix->vector, chan);
936 }
937 goto msix_single_vector;
938 }
939 }
940 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
941 goto done;
942
943 msix_single_vector:
944 msix = &device->msix_entries[0];
945 msix->entry = 0;
946 err = pci_enable_msix(pdev, device->msix_entries, 1);
947 if (err)
948 goto msi;
949
950 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
951 "ioat-msix", device);
952 if (err) {
953 pci_disable_msix(pdev);
954 goto msi;
955 }
956 goto done;
957
958 msi:
959 err = pci_enable_msi(pdev);
960 if (err)
961 goto intx;
962
963 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
964 "ioat-msi", device);
965 if (err) {
966 pci_disable_msi(pdev);
967 goto intx;
968 }
969 goto done;
970
971 intx:
972 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
973 IRQF_SHARED, "ioat-intx", device);
974 if (err)
975 goto err_no_irq;
976
977 done:
978 if (device->intr_quirk)
979 device->intr_quirk(device);
980 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
981 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
982 return 0;
983
984 err_no_irq:
985 /* Disable all interrupt generation */
986 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
987 dev_err(dev, "no usable interrupts\n");
988 return err;
989 }
990
991 static void ioat_disable_interrupts(struct ioatdma_device *device)
992 {
993 /* Disable all interrupt generation */
994 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
995 }
996
997 int __devinit ioat_probe(struct ioatdma_device *device)
998 {
999 int err = -ENODEV;
1000 struct dma_device *dma = &device->common;
1001 struct pci_dev *pdev = device->pdev;
1002 struct device *dev = &pdev->dev;
1003
1004 /* DMA coherent memory pool for DMA descriptor allocations */
1005 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1006 sizeof(struct ioat_dma_descriptor),
1007 64, 0);
1008 if (!device->dma_pool) {
1009 err = -ENOMEM;
1010 goto err_dma_pool;
1011 }
1012
1013 device->completion_pool = pci_pool_create("completion_pool", pdev,
1014 sizeof(u64), SMP_CACHE_BYTES,
1015 SMP_CACHE_BYTES);
1016
1017 if (!device->completion_pool) {
1018 err = -ENOMEM;
1019 goto err_completion_pool;
1020 }
1021
1022 device->enumerate_channels(device);
1023
1024 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1025 dma->dev = &pdev->dev;
1026
1027 if (!dma->chancnt) {
1028 dev_err(dev, "channel enumeration error\n");
1029 goto err_setup_interrupts;
1030 }
1031
1032 err = ioat_dma_setup_interrupts(device);
1033 if (err)
1034 goto err_setup_interrupts;
1035
1036 err = device->self_test(device);
1037 if (err)
1038 goto err_self_test;
1039
1040 return 0;
1041
1042 err_self_test:
1043 ioat_disable_interrupts(device);
1044 err_setup_interrupts:
1045 pci_pool_destroy(device->completion_pool);
1046 err_completion_pool:
1047 pci_pool_destroy(device->dma_pool);
1048 err_dma_pool:
1049 return err;
1050 }
1051
1052 int __devinit ioat_register(struct ioatdma_device *device)
1053 {
1054 int err = dma_async_device_register(&device->common);
1055
1056 if (err) {
1057 ioat_disable_interrupts(device);
1058 pci_pool_destroy(device->completion_pool);
1059 pci_pool_destroy(device->dma_pool);
1060 }
1061
1062 return err;
1063 }
1064
1065 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1066 static void ioat1_intr_quirk(struct ioatdma_device *device)
1067 {
1068 struct pci_dev *pdev = device->pdev;
1069 u32 dmactrl;
1070
1071 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1072 if (pdev->msi_enabled)
1073 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1074 else
1075 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1076 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1077 }
1078
1079 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1080 {
1081 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1082
1083 return sprintf(page, "%d\n", ioat->desccount);
1084 }
1085 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1086
1087 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1088 {
1089 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1090
1091 return sprintf(page, "%d\n", ioat->active);
1092 }
1093 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1094
1095 static ssize_t cap_show(struct dma_chan *c, char *page)
1096 {
1097 struct dma_device *dma = c->device;
1098
1099 return sprintf(page, "copy%s%s%s%s%s%s\n",
1100 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1101 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1102 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1103 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1104 dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
1105 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1106
1107 }
1108 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1109
1110 static ssize_t version_show(struct dma_chan *c, char *page)
1111 {
1112 struct dma_device *dma = c->device;
1113 struct ioatdma_device *device = to_ioatdma_device(dma);
1114
1115 return sprintf(page, "%d.%d\n",
1116 device->version >> 4, device->version & 0xf);
1117 }
1118 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1119
1120 static struct attribute *ioat1_attrs[] = {
1121 &ring_size_attr.attr,
1122 &ring_active_attr.attr,
1123 &ioat_cap_attr.attr,
1124 &ioat_version_attr.attr,
1125 NULL,
1126 };
1127
1128 static ssize_t
1129 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1130 {
1131 struct ioat_sysfs_entry *entry;
1132 struct ioat_chan_common *chan;
1133
1134 entry = container_of(attr, struct ioat_sysfs_entry, attr);
1135 chan = container_of(kobj, struct ioat_chan_common, kobj);
1136
1137 if (!entry->show)
1138 return -EIO;
1139 return entry->show(&chan->common, page);
1140 }
1141
1142 const struct sysfs_ops ioat_sysfs_ops = {
1143 .show = ioat_attr_show,
1144 };
1145
1146 static struct kobj_type ioat1_ktype = {
1147 .sysfs_ops = &ioat_sysfs_ops,
1148 .default_attrs = ioat1_attrs,
1149 };
1150
1151 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1152 {
1153 struct dma_device *dma = &device->common;
1154 struct dma_chan *c;
1155
1156 list_for_each_entry(c, &dma->channels, device_node) {
1157 struct ioat_chan_common *chan = to_chan_common(c);
1158 struct kobject *parent = &c->dev->device.kobj;
1159 int err;
1160
1161 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1162 if (err) {
1163 dev_warn(to_dev(chan),
1164 "sysfs init error (%d), continuing...\n", err);
1165 kobject_put(&chan->kobj);
1166 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1167 }
1168 }
1169 }
1170
1171 void ioat_kobject_del(struct ioatdma_device *device)
1172 {
1173 struct dma_device *dma = &device->common;
1174 struct dma_chan *c;
1175
1176 list_for_each_entry(c, &dma->channels, device_node) {
1177 struct ioat_chan_common *chan = to_chan_common(c);
1178
1179 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1180 kobject_del(&chan->kobj);
1181 kobject_put(&chan->kobj);
1182 }
1183 }
1184 }
1185
1186 int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1187 {
1188 struct pci_dev *pdev = device->pdev;
1189 struct dma_device *dma;
1190 int err;
1191
1192 device->intr_quirk = ioat1_intr_quirk;
1193 device->enumerate_channels = ioat1_enumerate_channels;
1194 device->self_test = ioat_dma_self_test;
1195 device->timer_fn = ioat1_timer_event;
1196 device->cleanup_fn = ioat1_cleanup_event;
1197 dma = &device->common;
1198 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1202 dma->device_is_tx_complete = ioat_is_dma_complete;
1203
1204 err = ioat_probe(device);
1205 if (err)
1206 return err;
1207 ioat_set_tcp_copy_break(4096);
1208 err = ioat_register(device);
1209 if (err)
1210 return err;
1211 ioat_kobject_add(device, &ioat1_ktype);
1212
1213 if (dca)
1214 device->dca = ioat_dca_init(pdev, device->reg_base);
1215
1216 return err;
1217 }
1218
1219 void __devexit ioat_dma_remove(struct ioatdma_device *device)
1220 {
1221 struct dma_device *dma = &device->common;
1222
1223 ioat_disable_interrupts(device);
1224
1225 ioat_kobject_del(device);
1226
1227 dma_async_device_unregister(dma);
1228
1229 pci_pool_destroy(device->dma_pool);
1230 pci_pool_destroy(device->completion_pool);
1231
1232 INIT_LIST_HEAD(&dma->channels);
1233 }