]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/ioat/dma_v2.c
Merge remote-tracking branches 'spi/topic/s3c64xx', 'spi/topic/sc18is602', 'spi/topic...
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / ioat / dma_v2.c
CommitLineData
5cbafa65
DW
1/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
5a0e3ad6 30#include <linux/slab.h>
5cbafa65
DW
31#include <linux/pci.h>
32#include <linux/interrupt.h>
33#include <linux/dmaengine.h>
34#include <linux/delay.h>
35#include <linux/dma-mapping.h>
36#include <linux/workqueue.h>
70c71606 37#include <linux/prefetch.h>
5cbafa65
DW
38#include <linux/i7300_idle.h>
39#include "dma.h"
40#include "dma_v2.h"
41#include "registers.h"
42#include "hw.h"
43
d2ebfb33
RKAL
44#include "../dmaengine.h"
45
bf40a686 46int ioat_ring_alloc_order = 8;
5cbafa65
DW
47module_param(ioat_ring_alloc_order, int, 0644);
48MODULE_PARM_DESC(ioat_ring_alloc_order,
376ec376
DW
49 "ioat2+: allocate 2^n descriptors per channel"
50 " (default: 8 max: 16)");
a309218a
DW
51static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
52module_param(ioat_ring_max_alloc_order, int, 0644);
53MODULE_PARM_DESC(ioat_ring_max_alloc_order,
376ec376 54 "ioat2+: upper limit for ring size (default: 16)");
5cbafa65 55
b094ad3b 56void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
5cbafa65 57{
281befa5 58 struct ioat_chan_common *chan = &ioat->base;
5cbafa65 59
376ec376 60 ioat->dmacount += ioat2_ring_pending(ioat);
5cbafa65 61 ioat->issued = ioat->head;
281befa5
DW
62 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
63 dev_dbg(to_dev(chan),
6df9183a
DW
64 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
65 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
5cbafa65
DW
66}
67
281befa5 68void ioat2_issue_pending(struct dma_chan *c)
5cbafa65 69{
281befa5 70 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
5cbafa65 71
281befa5 72 if (ioat2_ring_pending(ioat)) {
074cc476 73 spin_lock_bh(&ioat->prep_lock);
5cbafa65 74 __ioat2_issue_pending(ioat);
074cc476 75 spin_unlock_bh(&ioat->prep_lock);
281befa5 76 }
5cbafa65
DW
77}
78
79/**
80 * ioat2_update_pending - log pending descriptors
81 * @ioat: ioat2+ channel
82 *
281befa5 83 * Check if the number of unsubmitted descriptors has exceeded the
074cc476 84 * watermark. Called with prep_lock held
5cbafa65
DW
85 */
86static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
87{
281befa5 88 if (ioat2_ring_pending(ioat) > ioat_pending_level)
5cbafa65 89 __ioat2_issue_pending(ioat);
5cbafa65
DW
90}
91
92static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
93{
5cbafa65
DW
94 struct ioat_ring_ent *desc;
95 struct ioat_dma_descriptor *hw;
5cbafa65
DW
96
97 if (ioat2_ring_space(ioat) < 1) {
98 dev_err(to_dev(&ioat->base),
99 "Unable to start null desc - ring full\n");
100 return;
101 }
102
6df9183a
DW
103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
104 __func__, ioat->head, ioat->tail, ioat->issued);
074cc476 105 desc = ioat2_get_ring_ent(ioat, ioat->head);
5cbafa65
DW
106
107 hw = desc->hw;
108 hw->ctl = 0;
109 hw->ctl_f.null = 1;
110 hw->ctl_f.int_en = 1;
111 hw->ctl_f.compl_write = 1;
112 /* set size to non-zero value (channel returns error when size is 0) */
113 hw->size = NULL_DESC_BUFFER_SIZE;
114 hw->src_addr = 0;
115 hw->dst_addr = 0;
116 async_tx_ack(&desc->txd);
09c8a5b8 117 ioat2_set_chainaddr(ioat, desc->txd.phys);
6df9183a 118 dump_desc_dbg(ioat, desc);
074cc476
DW
119 wmb();
120 ioat->head += 1;
5cbafa65
DW
121 __ioat2_issue_pending(ioat);
122}
123
124static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
125{
074cc476 126 spin_lock_bh(&ioat->prep_lock);
5cbafa65 127 __ioat2_start_null_desc(ioat);
074cc476 128 spin_unlock_bh(&ioat->prep_lock);
5cbafa65
DW
129}
130
27502935 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
5cbafa65 132{
5cbafa65 133 struct ioat_chan_common *chan = &ioat->base;
09c8a5b8 134 struct dma_async_tx_descriptor *tx;
5cbafa65
DW
135 struct ioat_ring_ent *desc;
136 bool seen_current = false;
137 u16 active;
074cc476 138 int idx = ioat->tail, i;
5cbafa65 139
6df9183a
DW
140 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
141 __func__, ioat->head, ioat->tail, ioat->issued);
142
5cbafa65
DW
143 active = ioat2_ring_active(ioat);
144 for (i = 0; i < active && !seen_current; i++) {
074cc476
DW
145 smp_read_barrier_depends();
146 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
147 desc = ioat2_get_ring_ent(ioat, idx + i);
5cbafa65 148 tx = &desc->txd;
6df9183a 149 dump_desc_dbg(ioat, desc);
5cbafa65 150 if (tx->cookie) {
d38a8c62 151 dma_descriptor_unmap(tx);
f7fbce07 152 dma_cookie_complete(tx);
5cbafa65
DW
153 if (tx->callback) {
154 tx->callback(tx->callback_param);
155 tx->callback = NULL;
156 }
157 }
158
159 if (tx->phys == phys_complete)
160 seen_current = true;
161 }
074cc476
DW
162 smp_mb(); /* finish all descriptor reads before incrementing tail */
163 ioat->tail = idx + i;
aa75db00 164 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
5cbafa65
DW
165
166 chan->last_completion = phys_complete;
074cc476 167 if (active - i == 0) {
09c8a5b8
DW
168 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
169 __func__);
170 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
a309218a 171 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
09c8a5b8
DW
172 }
173}
174
175/**
176 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
177 * @chan: ioat channel to be cleaned up
178 */
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{
181 struct ioat_chan_common *chan = &ioat->base;
27502935 182 dma_addr_t phys_complete;
5cbafa65 183
074cc476
DW
184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete))
186 __cleanup(ioat, phys_complete);
5cbafa65
DW
187 spin_unlock_bh(&chan->cleanup_lock);
188}
189
aa4d72ae 190void ioat2_cleanup_event(unsigned long data)
5cbafa65 191{
aa4d72ae 192 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
da87ca4d 193 struct ioat_chan_common *chan = &ioat->base;
5cbafa65
DW
194
195 ioat2_cleanup(ioat);
da87ca4d
DW
196 if (!test_bit(IOAT_RUN, &chan->state))
197 return;
f6ab95b5 198 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
5cbafa65
DW
199}
200
bf40a686 201void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
09c8a5b8
DW
202{
203 struct ioat_chan_common *chan = &ioat->base;
204
205 /* set the tail to be re-issued */
206 ioat->issued = ioat->tail;
207 ioat->dmacount = 0;
208 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
209 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
210
211 dev_dbg(to_dev(chan),
212 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
213 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
214
215 if (ioat2_ring_pending(ioat)) {
216 struct ioat_ring_ent *desc;
217
218 desc = ioat2_get_ring_ent(ioat, ioat->tail);
219 ioat2_set_chainaddr(ioat, desc->txd.phys);
220 __ioat2_issue_pending(ioat);
221 } else
222 __ioat2_start_null_desc(ioat);
223}
224
a6d52d70 225int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
09c8a5b8 226{
a6d52d70
DW
227 unsigned long end = jiffies + tmo;
228 int err = 0;
09c8a5b8
DW
229 u32 status;
230
231 status = ioat_chansts(chan);
232 if (is_ioat_active(status) || is_ioat_idle(status))
233 ioat_suspend(chan);
234 while (is_ioat_active(status) || is_ioat_idle(status)) {
7e55a70c 235 if (tmo && time_after(jiffies, end)) {
a6d52d70
DW
236 err = -ETIMEDOUT;
237 break;
238 }
09c8a5b8
DW
239 status = ioat_chansts(chan);
240 cpu_relax();
241 }
242
a6d52d70
DW
243 return err;
244}
245
246int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
247{
248 unsigned long end = jiffies + tmo;
249 int err = 0;
250
251 ioat_reset(chan);
252 while (ioat_reset_pending(chan)) {
253 if (end && time_after(jiffies, end)) {
254 err = -ETIMEDOUT;
255 break;
256 }
257 cpu_relax();
258 }
259
260 return err;
261}
262
263static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
264{
265 struct ioat_chan_common *chan = &ioat->base;
27502935 266 dma_addr_t phys_complete;
a6d52d70
DW
267
268 ioat2_quiesce(chan, 0);
09c8a5b8
DW
269 if (ioat_cleanup_preamble(chan, &phys_complete))
270 __cleanup(ioat, phys_complete);
271
bf40a686 272 __ioat2_restart_chan(ioat);
09c8a5b8
DW
273}
274
4dec23d7 275static void check_active(struct ioat2_dma_chan *ioat)
09c8a5b8 276{
09c8a5b8
DW
277 struct ioat_chan_common *chan = &ioat->base;
278
4dec23d7
DJ
279 if (ioat2_ring_active(ioat)) {
280 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
281 return;
282 }
a309218a 283
4dec23d7
DJ
284 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
285 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
286 else if (ioat->alloc_order > ioat_get_alloc_order()) {
a309218a
DW
287 /* if the ring is idle, empty, and oversized try to step
288 * down the size
289 */
4dec23d7 290 reshape_ring(ioat, ioat->alloc_order - 1);
a309218a
DW
291
292 /* keep shrinking until we get back to our minimum
293 * default size
294 */
295 if (ioat->alloc_order > ioat_get_alloc_order())
296 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
09c8a5b8 297 }
4dec23d7
DJ
298
299}
300
301void ioat2_timer_event(unsigned long data)
302{
303 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
304 struct ioat_chan_common *chan = &ioat->base;
305 dma_addr_t phys_complete;
306 u64 status;
307
308 status = ioat_chansts(chan);
309
310 /* when halted due to errors check for channel
311 * programming errors before advancing the completion state
312 */
313 if (is_ioat_halted(status)) {
314 u32 chanerr;
315
316 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
317 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
318 __func__, chanerr);
319 if (test_bit(IOAT_RUN, &chan->state))
320 BUG_ON(is_ioat_bug(chanerr));
321 else /* we never got off the ground */
322 return;
323 }
324
325 /* if we haven't made progress and we have already
326 * acknowledged a pending completion once, then be more
327 * forceful with a restart
328 */
329 spin_lock_bh(&chan->cleanup_lock);
330 if (ioat_cleanup_preamble(chan, &phys_complete))
331 __cleanup(ioat, phys_complete);
332 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
333 spin_lock_bh(&ioat->prep_lock);
334 ioat2_restart_channel(ioat);
335 spin_unlock_bh(&ioat->prep_lock);
336 spin_unlock_bh(&chan->cleanup_lock);
337 return;
338 } else {
339 set_bit(IOAT_COMPLETION_ACK, &chan->state);
340 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
341 }
342
343
344 if (ioat2_ring_active(ioat))
345 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
346 else {
347 spin_lock_bh(&ioat->prep_lock);
348 check_active(ioat);
349 spin_unlock_bh(&ioat->prep_lock);
350 }
351 spin_unlock_bh(&chan->cleanup_lock);
09c8a5b8
DW
352}
353
a6d52d70
DW
354static int ioat2_reset_hw(struct ioat_chan_common *chan)
355{
356 /* throw away whatever the channel was doing and get it initialized */
357 u32 chanerr;
358
359 ioat2_quiesce(chan, msecs_to_jiffies(100));
360
361 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
362 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
363
364 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
365}
366
5cbafa65
DW
367/**
368 * ioat2_enumerate_channels - find and initialize the device's channels
369 * @device: the device to be enumerated
370 */
bf40a686 371int ioat2_enumerate_channels(struct ioatdma_device *device)
5cbafa65
DW
372{
373 struct ioat2_dma_chan *ioat;
374 struct device *dev = &device->pdev->dev;
375 struct dma_device *dma = &device->common;
376 u8 xfercap_log;
377 int i;
378
379 INIT_LIST_HEAD(&dma->channels);
380 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
bb320786
DW
381 dma->chancnt &= 0x1f; /* bits [4:0] valid */
382 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
383 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
384 dma->chancnt, ARRAY_SIZE(device->idx));
385 dma->chancnt = ARRAY_SIZE(device->idx);
386 }
5cbafa65 387 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
bb320786 388 xfercap_log &= 0x1f; /* bits [4:0] valid */
5cbafa65
DW
389 if (xfercap_log == 0)
390 return 0;
6df9183a 391 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
5cbafa65
DW
392
393 /* FIXME which i/oat version is i7300? */
394#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
395 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
396 dma->chancnt--;
397#endif
398 for (i = 0; i < dma->chancnt; i++) {
399 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
400 if (!ioat)
401 break;
402
aa4d72ae 403 ioat_init_channel(device, &ioat->base, i);
5cbafa65 404 ioat->xfercap_log = xfercap_log;
074cc476 405 spin_lock_init(&ioat->prep_lock);
a6d52d70
DW
406 if (device->reset_hw(&ioat->base)) {
407 i = 0;
408 break;
409 }
5cbafa65
DW
410 }
411 dma->chancnt = i;
412 return i;
413}
414
415static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
416{
417 struct dma_chan *c = tx->chan;
418 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
09c8a5b8 419 struct ioat_chan_common *chan = &ioat->base;
884485e1 420 dma_cookie_t cookie;
5cbafa65 421
884485e1 422 cookie = dma_cookie_assign(tx);
6df9183a
DW
423 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
424
4dec23d7 425 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
09c8a5b8 426 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
074cc476
DW
427
428 /* make descriptor updates visible before advancing ioat->head,
429 * this is purposefully not smp_wmb() since we are also
430 * publishing the descriptor updates to a dma device
431 */
432 wmb();
433
434 ioat->head += ioat->produce;
435
5cbafa65 436 ioat2_update_pending(ioat);
074cc476 437 spin_unlock_bh(&ioat->prep_lock);
5cbafa65
DW
438
439 return cookie;
440}
441
a309218a 442static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
5cbafa65
DW
443{
444 struct ioat_dma_descriptor *hw;
445 struct ioat_ring_ent *desc;
446 struct ioatdma_device *dma;
447 dma_addr_t phys;
448
449 dma = to_ioatdma_device(chan->device);
a309218a 450 hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
5cbafa65
DW
451 if (!hw)
452 return NULL;
453 memset(hw, 0, sizeof(*hw));
454
921eeadb 455 desc = kmem_cache_zalloc(ioat2_cache, flags);
5cbafa65
DW
456 if (!desc) {
457 pci_pool_free(dma->dma_pool, hw, phys);
458 return NULL;
459 }
460
461 dma_async_tx_descriptor_init(&desc->txd, chan);
462 desc->txd.tx_submit = ioat2_tx_submit_unlock;
463 desc->hw = hw;
464 desc->txd.phys = phys;
465 return desc;
466}
467
468static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
469{
470 struct ioatdma_device *dma;
471
472 dma = to_ioatdma_device(chan->device);
473 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
162b96e6 474 kmem_cache_free(ioat2_cache, desc);
5cbafa65
DW
475}
476
a309218a
DW
477static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
478{
479 struct ioat_ring_ent **ring;
480 int descs = 1 << order;
481 int i;
482
483 if (order > ioat_get_max_alloc_order())
484 return NULL;
485
486 /* allocate the array to hold the software ring */
487 ring = kcalloc(descs, sizeof(*ring), flags);
488 if (!ring)
489 return NULL;
490 for (i = 0; i < descs; i++) {
491 ring[i] = ioat2_alloc_ring_ent(c, flags);
492 if (!ring[i]) {
493 while (i--)
494 ioat2_free_ring_ent(ring[i], c);
495 kfree(ring);
496 return NULL;
497 }
498 set_desc_id(ring[i], i);
499 }
500
501 /* link descs */
502 for (i = 0; i < descs-1; i++) {
503 struct ioat_ring_ent *next = ring[i+1];
504 struct ioat_dma_descriptor *hw = ring[i]->hw;
505
506 hw->next = next->txd.phys;
507 }
508 ring[i]->hw->next = ring[0]->txd.phys;
509
510 return ring;
511}
512
556ab45f
DW
513void ioat2_free_chan_resources(struct dma_chan *c);
514
5cbafa65
DW
515/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
516 * @chan: channel to be initialized
517 */
bf40a686 518int ioat2_alloc_chan_resources(struct dma_chan *c)
5cbafa65
DW
519{
520 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
521 struct ioat_chan_common *chan = &ioat->base;
522 struct ioat_ring_ent **ring;
556ab45f 523 u64 status;
a309218a 524 int order;
19d78a61 525 int i = 0;
5cbafa65
DW
526
527 /* have we already been set up? */
528 if (ioat->ring)
529 return 1 << ioat->alloc_order;
530
531 /* Setup register to interrupt and write completion status on error */
f6ab95b5 532 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
5cbafa65 533
5cbafa65
DW
534 /* allocate a completion writeback area */
535 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
4fb9b9e8
DW
536 chan->completion = pci_pool_alloc(chan->device->completion_pool,
537 GFP_KERNEL, &chan->completion_dma);
538 if (!chan->completion)
5cbafa65
DW
539 return -ENOMEM;
540
4fb9b9e8
DW
541 memset(chan->completion, 0, sizeof(*chan->completion));
542 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
5cbafa65 543 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
4fb9b9e8 544 writel(((u64) chan->completion_dma) >> 32,
5cbafa65
DW
545 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
546
a309218a
DW
547 order = ioat_get_alloc_order();
548 ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
5cbafa65
DW
549 if (!ring)
550 return -ENOMEM;
5cbafa65 551
074cc476
DW
552 spin_lock_bh(&chan->cleanup_lock);
553 spin_lock_bh(&ioat->prep_lock);
5cbafa65
DW
554 ioat->ring = ring;
555 ioat->head = 0;
556 ioat->issued = 0;
557 ioat->tail = 0;
a309218a 558 ioat->alloc_order = order;
da87ca4d 559 set_bit(IOAT_RUN, &chan->state);
074cc476
DW
560 spin_unlock_bh(&ioat->prep_lock);
561 spin_unlock_bh(&chan->cleanup_lock);
5cbafa65 562
5cbafa65
DW
563 ioat2_start_null_desc(ioat);
564
556ab45f 565 /* check that we got off the ground */
19d78a61
DS
566 do {
567 udelay(1);
568 status = ioat_chansts(chan);
569 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
570
556ab45f 571 if (is_ioat_active(status) || is_ioat_idle(status)) {
556ab45f
DW
572 return 1 << ioat->alloc_order;
573 } else {
574 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
575
576 dev_WARN(to_dev(chan),
577 "failed to start channel chanerr: %#x\n", chanerr);
578 ioat2_free_chan_resources(c);
579 return -EFAULT;
580 }
a309218a
DW
581}
582
bf40a686 583bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
a309218a
DW
584{
585 /* reshape differs from normal ring allocation in that we want
586 * to allocate a new software ring while only
587 * extending/truncating the hardware ring
588 */
589 struct ioat_chan_common *chan = &ioat->base;
590 struct dma_chan *c = &chan->common;
21b764e0 591 const u32 curr_size = ioat2_ring_size(ioat);
a309218a 592 const u16 active = ioat2_ring_active(ioat);
21b764e0 593 const u32 new_size = 1 << order;
a309218a
DW
594 struct ioat_ring_ent **ring;
595 u16 i;
596
597 if (order > ioat_get_max_alloc_order())
598 return false;
599
600 /* double check that we have at least 1 free descriptor */
601 if (active == curr_size)
602 return false;
603
604 /* when shrinking, verify that we can hold the current active
605 * set in the new ring
606 */
607 if (active >= new_size)
608 return false;
609
610 /* allocate the array to hold the software ring */
611 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
612 if (!ring)
613 return false;
614
615 /* allocate/trim descriptors as needed */
616 if (new_size > curr_size) {
617 /* copy current descriptors to the new ring */
618 for (i = 0; i < curr_size; i++) {
619 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
620 u16 new_idx = (ioat->tail+i) & (new_size-1);
621
622 ring[new_idx] = ioat->ring[curr_idx];
623 set_desc_id(ring[new_idx], new_idx);
624 }
625
626 /* add new descriptors to the ring */
627 for (i = curr_size; i < new_size; i++) {
628 u16 new_idx = (ioat->tail+i) & (new_size-1);
629
630 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
631 if (!ring[new_idx]) {
632 while (i--) {
633 u16 new_idx = (ioat->tail+i) & (new_size-1);
634
635 ioat2_free_ring_ent(ring[new_idx], c);
636 }
637 kfree(ring);
638 return false;
639 }
640 set_desc_id(ring[new_idx], new_idx);
641 }
642
643 /* hw link new descriptors */
644 for (i = curr_size-1; i < new_size; i++) {
645 u16 new_idx = (ioat->tail+i) & (new_size-1);
646 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
647 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
648
649 hw->next = next->txd.phys;
650 }
651 } else {
652 struct ioat_dma_descriptor *hw;
653 struct ioat_ring_ent *next;
654
655 /* copy current descriptors to the new ring, dropping the
656 * removed descriptors
657 */
658 for (i = 0; i < new_size; i++) {
659 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
660 u16 new_idx = (ioat->tail+i) & (new_size-1);
661
662 ring[new_idx] = ioat->ring[curr_idx];
663 set_desc_id(ring[new_idx], new_idx);
664 }
665
666 /* free deleted descriptors */
667 for (i = new_size; i < curr_size; i++) {
668 struct ioat_ring_ent *ent;
669
670 ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
671 ioat2_free_ring_ent(ent, c);
672 }
673
674 /* fix up hardware ring */
675 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
676 next = ring[(ioat->tail+new_size) & (new_size-1)];
677 hw->next = next->txd.phys;
678 }
679
680 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
681 __func__, new_size);
682
683 kfree(ioat->ring);
684 ioat->ring = ring;
685 ioat->alloc_order = order;
686
687 return true;
5cbafa65
DW
688}
689
690/**
074cc476 691 * ioat2_check_space_lock - verify space and grab ring producer lock
5cbafa65
DW
692 * @ioat: ioat2,3 channel (ring) to operate on
693 * @num_descs: allocation length
694 */
074cc476 695int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
5cbafa65
DW
696{
697 struct ioat_chan_common *chan = &ioat->base;
074cc476 698 bool retry;
5cbafa65 699
074cc476
DW
700 retry:
701 spin_lock_bh(&ioat->prep_lock);
a309218a
DW
702 /* never allow the last descriptor to be consumed, we need at
703 * least one free at all times to allow for on-the-fly ring
704 * resizing.
705 */
074cc476
DW
706 if (likely(ioat2_ring_space(ioat) > num_descs)) {
707 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
708 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
709 ioat->produce = num_descs;
710 return 0; /* with ioat->prep_lock held */
5cbafa65 711 }
074cc476
DW
712 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
713 spin_unlock_bh(&ioat->prep_lock);
5cbafa65 714
074cc476
DW
715 /* is another cpu already trying to expand the ring? */
716 if (retry)
717 goto retry;
5cbafa65 718
074cc476
DW
719 spin_lock_bh(&chan->cleanup_lock);
720 spin_lock_bh(&ioat->prep_lock);
721 retry = reshape_ring(ioat, ioat->alloc_order + 1);
722 clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
723 spin_unlock_bh(&ioat->prep_lock);
724 spin_unlock_bh(&chan->cleanup_lock);
725
726 /* if we were able to expand the ring retry the allocation */
727 if (retry)
728 goto retry;
729
730 if (printk_ratelimit())
731 dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
732 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
733
734 /* progress reclaim in the allocation failure case we may be
735 * called under bh_disabled so we need to trigger the timer
736 * event directly
737 */
738 if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
739 struct ioatdma_device *device = chan->device;
740
741 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
742 device->timer_fn((unsigned long) &chan->common);
743 }
744
745 return -ENOMEM;
5cbafa65
DW
746}
747
bf40a686 748struct dma_async_tx_descriptor *
5cbafa65
DW
749ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
750 dma_addr_t dma_src, size_t len, unsigned long flags)
751{
752 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
753 struct ioat_dma_descriptor *hw;
754 struct ioat_ring_ent *desc;
755 dma_addr_t dst = dma_dest;
756 dma_addr_t src = dma_src;
757 size_t total_len = len;
074cc476 758 int num_descs, idx, i;
5cbafa65
DW
759
760 num_descs = ioat2_xferlen_to_descs(ioat, len);
074cc476
DW
761 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
762 idx = ioat->head;
5cbafa65
DW
763 else
764 return NULL;
f477f5b3
AM
765 i = 0;
766 do {
5cbafa65
DW
767 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
768
769 desc = ioat2_get_ring_ent(ioat, idx + i);
770 hw = desc->hw;
771
772 hw->size = copy;
773 hw->ctl = 0;
774 hw->src_addr = src;
775 hw->dst_addr = dst;
776
777 len -= copy;
778 dst += copy;
779 src += copy;
6df9183a 780 dump_desc_dbg(ioat, desc);
f477f5b3 781 } while (++i < num_descs);
5cbafa65
DW
782
783 desc->txd.flags = flags;
784 desc->len = total_len;
785 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
128f2d56 786 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
5cbafa65 787 hw->ctl_f.compl_write = 1;
6df9183a 788 dump_desc_dbg(ioat, desc);
5cbafa65
DW
789 /* we leave the channel locked to ensure in order submission */
790
791 return &desc->txd;
792}
793
794/**
795 * ioat2_free_chan_resources - release all the descriptors
796 * @chan: the channel to be cleaned
797 */
bf40a686 798void ioat2_free_chan_resources(struct dma_chan *c)
5cbafa65
DW
799{
800 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
801 struct ioat_chan_common *chan = &ioat->base;
bf40a686 802 struct ioatdma_device *device = chan->device;
5cbafa65
DW
803 struct ioat_ring_ent *desc;
804 const u16 total_descs = 1 << ioat->alloc_order;
805 int descs;
806 int i;
807
808 /* Before freeing channel resources first check
809 * if they have been previously allocated for this channel.
810 */
811 if (!ioat->ring)
812 return;
813
da87ca4d 814 ioat_stop(chan);
a6d52d70 815 device->reset_hw(chan);
5cbafa65 816
074cc476
DW
817 spin_lock_bh(&chan->cleanup_lock);
818 spin_lock_bh(&ioat->prep_lock);
5cbafa65 819 descs = ioat2_ring_space(ioat);
6df9183a 820 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
5cbafa65
DW
821 for (i = 0; i < descs; i++) {
822 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
823 ioat2_free_ring_ent(desc, c);
824 }
825
826 if (descs < total_descs)
827 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
828 total_descs - descs);
829
830 for (i = 0; i < total_descs - descs; i++) {
831 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
6df9183a 832 dump_desc_dbg(ioat, desc);
5cbafa65
DW
833 ioat2_free_ring_ent(desc, c);
834 }
835
836 kfree(ioat->ring);
837 ioat->ring = NULL;
838 ioat->alloc_order = 0;
bf40a686 839 pci_pool_free(device->completion_pool, chan->completion,
4fb9b9e8 840 chan->completion_dma);
074cc476
DW
841 spin_unlock_bh(&ioat->prep_lock);
842 spin_unlock_bh(&chan->cleanup_lock);
5cbafa65
DW
843
844 chan->last_completion = 0;
4fb9b9e8 845 chan->completion_dma = 0;
5cbafa65 846 ioat->dmacount = 0;
5cbafa65
DW
847}
848
5669e31c
DW
849static ssize_t ring_size_show(struct dma_chan *c, char *page)
850{
851 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
852
853 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
854}
855static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
856
857static ssize_t ring_active_show(struct dma_chan *c, char *page)
858{
859 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
860
861 /* ...taken outside the lock, no need to be precise */
862 return sprintf(page, "%d\n", ioat2_ring_active(ioat));
863}
864static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
865
866static struct attribute *ioat2_attrs[] = {
867 &ring_size_attr.attr,
868 &ring_active_attr.attr,
869 &ioat_cap_attr.attr,
870 &ioat_version_attr.attr,
871 NULL,
872};
873
874struct kobj_type ioat2_ktype = {
875 .sysfs_ops = &ioat_sysfs_ops,
876 .default_attrs = ioat2_attrs,
877};
878
4bf27b8b 879int ioat2_dma_probe(struct ioatdma_device *device, int dca)
5cbafa65
DW
880{
881 struct pci_dev *pdev = device->pdev;
882 struct dma_device *dma;
883 struct dma_chan *c;
884 struct ioat_chan_common *chan;
885 int err;
886
887 device->enumerate_channels = ioat2_enumerate_channels;
a6d52d70 888 device->reset_hw = ioat2_reset_hw;
aa4d72ae 889 device->cleanup_fn = ioat2_cleanup_event;
bf40a686 890 device->timer_fn = ioat2_timer_event;
9de6fc71 891 device->self_test = ioat_dma_self_test;
5cbafa65
DW
892 dma = &device->common;
893 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
894 dma->device_issue_pending = ioat2_issue_pending;
895 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
896 dma->device_free_chan_resources = ioat2_free_chan_resources;
c50a898f 897 dma->device_tx_status = ioat_dma_tx_status;
5cbafa65
DW
898
899 err = ioat_probe(device);
900 if (err)
901 return err;
902 ioat_set_tcp_copy_break(2048);
903
904 list_for_each_entry(c, &dma->channels, device_node) {
905 chan = to_chan_common(c);
906 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
907 chan->reg_base + IOAT_DCACTRL_OFFSET);
908 }
909
910 err = ioat_register(device);
911 if (err)
912 return err;
5669e31c
DW
913
914 ioat_kobject_add(device, &ioat2_ktype);
915
5cbafa65
DW
916 if (dca)
917 device->dca = ioat2_dca_init(pdev, device->reg_base);
918
5cbafa65
DW
919 return err;
920}