]>
Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | |
25 | * does asynchronous data movement and checksumming operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/dmaengine.h> | |
33 | #include <linux/delay.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/workqueue.h> | |
36 | #include <linux/i7300_idle.h> | |
37 | #include "dma.h" | |
38 | #include "dma_v2.h" | |
39 | #include "registers.h" | |
40 | #include "hw.h" | |
41 | ||
42 | static int ioat_ring_alloc_order = 8; | |
43 | module_param(ioat_ring_alloc_order, int, 0644); | |
44 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
45 | "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); | |
a309218a DW |
46 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; |
47 | module_param(ioat_ring_max_alloc_order, int, 0644); | |
48 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |
49 | "ioat2+: upper limit for dynamic ring resizing (default: n=16)"); | |
5cbafa65 DW |
50 | |
51 | static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) | |
52 | { | |
53 | void * __iomem reg_base = ioat->base.reg_base; | |
54 | ||
55 | ioat->pending = 0; | |
09c8a5b8 | 56 | ioat->dmacount += ioat2_ring_pending(ioat);; |
5cbafa65 DW |
57 | ioat->issued = ioat->head; |
58 | /* make descriptor updates globally visible before notifying channel */ | |
59 | wmb(); | |
60 | writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); | |
6df9183a DW |
61 | dev_dbg(to_dev(&ioat->base), |
62 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
63 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
5cbafa65 DW |
64 | } |
65 | ||
66 | static void ioat2_issue_pending(struct dma_chan *chan) | |
67 | { | |
68 | struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); | |
69 | ||
70 | spin_lock_bh(&ioat->ring_lock); | |
71 | if (ioat->pending == 1) | |
72 | __ioat2_issue_pending(ioat); | |
73 | spin_unlock_bh(&ioat->ring_lock); | |
74 | } | |
75 | ||
76 | /** | |
77 | * ioat2_update_pending - log pending descriptors | |
78 | * @ioat: ioat2+ channel | |
79 | * | |
80 | * set pending to '1' unless pending is already set to '2', pending == 2 | |
81 | * indicates that submission is temporarily blocked due to an in-flight | |
82 | * reset. If we are already above the ioat_pending_level threshold then | |
83 | * just issue pending. | |
84 | * | |
85 | * called with ring_lock held | |
86 | */ | |
87 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |
88 | { | |
89 | if (unlikely(ioat->pending == 2)) | |
90 | return; | |
91 | else if (ioat2_ring_pending(ioat) > ioat_pending_level) | |
92 | __ioat2_issue_pending(ioat); | |
93 | else | |
94 | ioat->pending = 1; | |
95 | } | |
96 | ||
97 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
98 | { | |
5cbafa65 DW |
99 | struct ioat_ring_ent *desc; |
100 | struct ioat_dma_descriptor *hw; | |
101 | int idx; | |
102 | ||
103 | if (ioat2_ring_space(ioat) < 1) { | |
104 | dev_err(to_dev(&ioat->base), | |
105 | "Unable to start null desc - ring full\n"); | |
106 | return; | |
107 | } | |
108 | ||
6df9183a DW |
109 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
110 | __func__, ioat->head, ioat->tail, ioat->issued); | |
5cbafa65 DW |
111 | idx = ioat2_desc_alloc(ioat, 1); |
112 | desc = ioat2_get_ring_ent(ioat, idx); | |
113 | ||
114 | hw = desc->hw; | |
115 | hw->ctl = 0; | |
116 | hw->ctl_f.null = 1; | |
117 | hw->ctl_f.int_en = 1; | |
118 | hw->ctl_f.compl_write = 1; | |
119 | /* set size to non-zero value (channel returns error when size is 0) */ | |
120 | hw->size = NULL_DESC_BUFFER_SIZE; | |
121 | hw->src_addr = 0; | |
122 | hw->dst_addr = 0; | |
123 | async_tx_ack(&desc->txd); | |
09c8a5b8 | 124 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
6df9183a | 125 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
126 | __ioat2_issue_pending(ioat); |
127 | } | |
128 | ||
129 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
130 | { | |
131 | spin_lock_bh(&ioat->ring_lock); | |
132 | __ioat2_start_null_desc(ioat); | |
133 | spin_unlock_bh(&ioat->ring_lock); | |
134 | } | |
135 | ||
09c8a5b8 | 136 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
5cbafa65 | 137 | { |
5cbafa65 | 138 | struct ioat_chan_common *chan = &ioat->base; |
09c8a5b8 | 139 | struct dma_async_tx_descriptor *tx; |
5cbafa65 DW |
140 | struct ioat_ring_ent *desc; |
141 | bool seen_current = false; | |
142 | u16 active; | |
143 | int i; | |
5cbafa65 | 144 | |
6df9183a DW |
145 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
146 | __func__, ioat->head, ioat->tail, ioat->issued); | |
147 | ||
5cbafa65 DW |
148 | active = ioat2_ring_active(ioat); |
149 | for (i = 0; i < active && !seen_current; i++) { | |
150 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | |
151 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
152 | tx = &desc->txd; | |
6df9183a | 153 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
154 | if (tx->cookie) { |
155 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | |
156 | chan->completed_cookie = tx->cookie; | |
157 | tx->cookie = 0; | |
158 | if (tx->callback) { | |
159 | tx->callback(tx->callback_param); | |
160 | tx->callback = NULL; | |
161 | } | |
162 | } | |
163 | ||
164 | if (tx->phys == phys_complete) | |
165 | seen_current = true; | |
166 | } | |
167 | ioat->tail += i; | |
168 | BUG_ON(!seen_current); /* no active descs have written a completion? */ | |
5cbafa65 DW |
169 | |
170 | chan->last_completion = phys_complete; | |
09c8a5b8 DW |
171 | if (ioat->head == ioat->tail) { |
172 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", | |
173 | __func__); | |
174 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
a309218a | 175 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
09c8a5b8 DW |
176 | } |
177 | } | |
178 | ||
179 | /** | |
180 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | |
181 | * @chan: ioat channel to be cleaned up | |
182 | */ | |
183 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |
184 | { | |
185 | struct ioat_chan_common *chan = &ioat->base; | |
186 | unsigned long phys_complete; | |
5cbafa65 | 187 | |
09c8a5b8 DW |
188 | prefetch(chan->completion); |
189 | ||
190 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
191 | return; | |
192 | ||
193 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
194 | spin_unlock_bh(&chan->cleanup_lock); | |
195 | return; | |
196 | } | |
197 | ||
198 | if (!spin_trylock_bh(&ioat->ring_lock)) { | |
199 | spin_unlock_bh(&chan->cleanup_lock); | |
200 | return; | |
201 | } | |
202 | ||
203 | __cleanup(ioat, phys_complete); | |
204 | ||
205 | spin_unlock_bh(&ioat->ring_lock); | |
5cbafa65 DW |
206 | spin_unlock_bh(&chan->cleanup_lock); |
207 | } | |
208 | ||
209 | static void ioat2_cleanup_tasklet(unsigned long data) | |
210 | { | |
211 | struct ioat2_dma_chan *ioat = (void *) data; | |
212 | ||
213 | ioat2_cleanup(ioat); | |
f6ab95b5 | 214 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
215 | } |
216 | ||
09c8a5b8 DW |
217 | static void __restart_chan(struct ioat2_dma_chan *ioat) |
218 | { | |
219 | struct ioat_chan_common *chan = &ioat->base; | |
220 | ||
221 | /* set the tail to be re-issued */ | |
222 | ioat->issued = ioat->tail; | |
223 | ioat->dmacount = 0; | |
224 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
225 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
226 | ||
227 | dev_dbg(to_dev(chan), | |
228 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
229 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
230 | ||
231 | if (ioat2_ring_pending(ioat)) { | |
232 | struct ioat_ring_ent *desc; | |
233 | ||
234 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
235 | ioat2_set_chainaddr(ioat, desc->txd.phys); | |
236 | __ioat2_issue_pending(ioat); | |
237 | } else | |
238 | __ioat2_start_null_desc(ioat); | |
239 | } | |
240 | ||
241 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |
242 | { | |
243 | struct ioat_chan_common *chan = &ioat->base; | |
244 | unsigned long phys_complete; | |
245 | u32 status; | |
246 | ||
247 | status = ioat_chansts(chan); | |
248 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
249 | ioat_suspend(chan); | |
250 | while (is_ioat_active(status) || is_ioat_idle(status)) { | |
251 | status = ioat_chansts(chan); | |
252 | cpu_relax(); | |
253 | } | |
254 | ||
255 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
256 | __cleanup(ioat, phys_complete); | |
257 | ||
258 | __restart_chan(ioat); | |
259 | } | |
260 | ||
a309218a DW |
261 | static bool reshape_ring(struct ioat2_dma_chan *ioat, int order); |
262 | ||
09c8a5b8 DW |
263 | static void ioat2_timer_event(unsigned long data) |
264 | { | |
265 | struct ioat2_dma_chan *ioat = (void *) data; | |
266 | struct ioat_chan_common *chan = &ioat->base; | |
267 | ||
268 | spin_lock_bh(&chan->cleanup_lock); | |
269 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
270 | unsigned long phys_complete; | |
271 | u64 status; | |
272 | ||
273 | spin_lock_bh(&ioat->ring_lock); | |
274 | status = ioat_chansts(chan); | |
275 | ||
276 | /* when halted due to errors check for channel | |
277 | * programming errors before advancing the completion state | |
278 | */ | |
279 | if (is_ioat_halted(status)) { | |
280 | u32 chanerr; | |
281 | ||
282 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
283 | BUG_ON(is_ioat_bug(chanerr)); | |
284 | } | |
285 | ||
286 | /* if we haven't made progress and we have already | |
287 | * acknowledged a pending completion once, then be more | |
288 | * forceful with a restart | |
289 | */ | |
290 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
291 | __cleanup(ioat, phys_complete); | |
292 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
293 | ioat2_restart_channel(ioat); | |
294 | else { | |
295 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
296 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
297 | } | |
298 | spin_unlock_bh(&ioat->ring_lock); | |
a309218a DW |
299 | } else { |
300 | u16 active; | |
301 | ||
302 | /* if the ring is idle, empty, and oversized try to step | |
303 | * down the size | |
304 | */ | |
305 | spin_lock_bh(&ioat->ring_lock); | |
306 | active = ioat2_ring_active(ioat); | |
307 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | |
308 | reshape_ring(ioat, ioat->alloc_order-1); | |
309 | spin_unlock_bh(&ioat->ring_lock); | |
310 | ||
311 | /* keep shrinking until we get back to our minimum | |
312 | * default size | |
313 | */ | |
314 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
315 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
09c8a5b8 DW |
316 | } |
317 | spin_unlock_bh(&chan->cleanup_lock); | |
318 | } | |
319 | ||
5cbafa65 DW |
320 | /** |
321 | * ioat2_enumerate_channels - find and initialize the device's channels | |
322 | * @device: the device to be enumerated | |
323 | */ | |
324 | static int ioat2_enumerate_channels(struct ioatdma_device *device) | |
325 | { | |
326 | struct ioat2_dma_chan *ioat; | |
327 | struct device *dev = &device->pdev->dev; | |
328 | struct dma_device *dma = &device->common; | |
329 | u8 xfercap_log; | |
330 | int i; | |
331 | ||
332 | INIT_LIST_HEAD(&dma->channels); | |
333 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
334 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
335 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
336 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
337 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
338 | dma->chancnt = ARRAY_SIZE(device->idx); | |
339 | } | |
5cbafa65 | 340 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 341 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
5cbafa65 DW |
342 | if (xfercap_log == 0) |
343 | return 0; | |
6df9183a | 344 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
5cbafa65 DW |
345 | |
346 | /* FIXME which i/oat version is i7300? */ | |
347 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | |
348 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | |
349 | dma->chancnt--; | |
350 | #endif | |
351 | for (i = 0; i < dma->chancnt; i++) { | |
352 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | |
353 | if (!ioat) | |
354 | break; | |
355 | ||
356 | ioat_init_channel(device, &ioat->base, i, | |
09c8a5b8 | 357 | ioat2_timer_event, |
5cbafa65 DW |
358 | ioat2_cleanup_tasklet, |
359 | (unsigned long) ioat); | |
360 | ioat->xfercap_log = xfercap_log; | |
361 | spin_lock_init(&ioat->ring_lock); | |
362 | } | |
363 | dma->chancnt = i; | |
364 | return i; | |
365 | } | |
366 | ||
367 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
368 | { | |
369 | struct dma_chan *c = tx->chan; | |
370 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
09c8a5b8 | 371 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 DW |
372 | dma_cookie_t cookie = c->cookie; |
373 | ||
374 | cookie++; | |
375 | if (cookie < 0) | |
376 | cookie = 1; | |
377 | tx->cookie = cookie; | |
378 | c->cookie = cookie; | |
6df9183a DW |
379 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
380 | ||
09c8a5b8 DW |
381 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
382 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
5cbafa65 DW |
383 | ioat2_update_pending(ioat); |
384 | spin_unlock_bh(&ioat->ring_lock); | |
385 | ||
386 | return cookie; | |
387 | } | |
388 | ||
a309218a | 389 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) |
5cbafa65 DW |
390 | { |
391 | struct ioat_dma_descriptor *hw; | |
392 | struct ioat_ring_ent *desc; | |
393 | struct ioatdma_device *dma; | |
394 | dma_addr_t phys; | |
395 | ||
396 | dma = to_ioatdma_device(chan->device); | |
a309218a | 397 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); |
5cbafa65 DW |
398 | if (!hw) |
399 | return NULL; | |
400 | memset(hw, 0, sizeof(*hw)); | |
401 | ||
a309218a | 402 | desc = kzalloc(sizeof(*desc), flags); |
5cbafa65 DW |
403 | if (!desc) { |
404 | pci_pool_free(dma->dma_pool, hw, phys); | |
405 | return NULL; | |
406 | } | |
407 | ||
408 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
409 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | |
410 | desc->hw = hw; | |
411 | desc->txd.phys = phys; | |
412 | return desc; | |
413 | } | |
414 | ||
415 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |
416 | { | |
417 | struct ioatdma_device *dma; | |
418 | ||
419 | dma = to_ioatdma_device(chan->device); | |
420 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | |
421 | kfree(desc); | |
422 | } | |
423 | ||
a309218a DW |
424 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
425 | { | |
426 | struct ioat_ring_ent **ring; | |
427 | int descs = 1 << order; | |
428 | int i; | |
429 | ||
430 | if (order > ioat_get_max_alloc_order()) | |
431 | return NULL; | |
432 | ||
433 | /* allocate the array to hold the software ring */ | |
434 | ring = kcalloc(descs, sizeof(*ring), flags); | |
435 | if (!ring) | |
436 | return NULL; | |
437 | for (i = 0; i < descs; i++) { | |
438 | ring[i] = ioat2_alloc_ring_ent(c, flags); | |
439 | if (!ring[i]) { | |
440 | while (i--) | |
441 | ioat2_free_ring_ent(ring[i], c); | |
442 | kfree(ring); | |
443 | return NULL; | |
444 | } | |
445 | set_desc_id(ring[i], i); | |
446 | } | |
447 | ||
448 | /* link descs */ | |
449 | for (i = 0; i < descs-1; i++) { | |
450 | struct ioat_ring_ent *next = ring[i+1]; | |
451 | struct ioat_dma_descriptor *hw = ring[i]->hw; | |
452 | ||
453 | hw->next = next->txd.phys; | |
454 | } | |
455 | ring[i]->hw->next = ring[0]->txd.phys; | |
456 | ||
457 | return ring; | |
458 | } | |
459 | ||
5cbafa65 DW |
460 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring |
461 | * @chan: channel to be initialized | |
462 | */ | |
463 | static int ioat2_alloc_chan_resources(struct dma_chan *c) | |
464 | { | |
465 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
466 | struct ioat_chan_common *chan = &ioat->base; | |
467 | struct ioat_ring_ent **ring; | |
5cbafa65 | 468 | u32 chanerr; |
a309218a | 469 | int order; |
5cbafa65 DW |
470 | |
471 | /* have we already been set up? */ | |
472 | if (ioat->ring) | |
473 | return 1 << ioat->alloc_order; | |
474 | ||
475 | /* Setup register to interrupt and write completion status on error */ | |
f6ab95b5 | 476 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
477 | |
478 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
479 | if (chanerr) { | |
480 | dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | |
481 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
482 | } | |
483 | ||
484 | /* allocate a completion writeback area */ | |
485 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
486 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
487 | GFP_KERNEL, &chan->completion_dma); | |
488 | if (!chan->completion) | |
5cbafa65 DW |
489 | return -ENOMEM; |
490 | ||
4fb9b9e8 DW |
491 | memset(chan->completion, 0, sizeof(*chan->completion)); |
492 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
5cbafa65 | 493 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 494 | writel(((u64) chan->completion_dma) >> 32, |
5cbafa65 DW |
495 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
496 | ||
a309218a DW |
497 | order = ioat_get_alloc_order(); |
498 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | |
5cbafa65 DW |
499 | if (!ring) |
500 | return -ENOMEM; | |
5cbafa65 DW |
501 | |
502 | spin_lock_bh(&ioat->ring_lock); | |
503 | ioat->ring = ring; | |
504 | ioat->head = 0; | |
505 | ioat->issued = 0; | |
506 | ioat->tail = 0; | |
507 | ioat->pending = 0; | |
a309218a | 508 | ioat->alloc_order = order; |
5cbafa65 DW |
509 | spin_unlock_bh(&ioat->ring_lock); |
510 | ||
511 | tasklet_enable(&chan->cleanup_task); | |
512 | ioat2_start_null_desc(ioat); | |
513 | ||
a309218a DW |
514 | return 1 << ioat->alloc_order; |
515 | } | |
516 | ||
517 | static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) | |
518 | { | |
519 | /* reshape differs from normal ring allocation in that we want | |
520 | * to allocate a new software ring while only | |
521 | * extending/truncating the hardware ring | |
522 | */ | |
523 | struct ioat_chan_common *chan = &ioat->base; | |
524 | struct dma_chan *c = &chan->common; | |
525 | const u16 curr_size = ioat2_ring_mask(ioat) + 1; | |
526 | const u16 active = ioat2_ring_active(ioat); | |
527 | const u16 new_size = 1 << order; | |
528 | struct ioat_ring_ent **ring; | |
529 | u16 i; | |
530 | ||
531 | if (order > ioat_get_max_alloc_order()) | |
532 | return false; | |
533 | ||
534 | /* double check that we have at least 1 free descriptor */ | |
535 | if (active == curr_size) | |
536 | return false; | |
537 | ||
538 | /* when shrinking, verify that we can hold the current active | |
539 | * set in the new ring | |
540 | */ | |
541 | if (active >= new_size) | |
542 | return false; | |
543 | ||
544 | /* allocate the array to hold the software ring */ | |
545 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | |
546 | if (!ring) | |
547 | return false; | |
548 | ||
549 | /* allocate/trim descriptors as needed */ | |
550 | if (new_size > curr_size) { | |
551 | /* copy current descriptors to the new ring */ | |
552 | for (i = 0; i < curr_size; i++) { | |
553 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
554 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
555 | ||
556 | ring[new_idx] = ioat->ring[curr_idx]; | |
557 | set_desc_id(ring[new_idx], new_idx); | |
558 | } | |
559 | ||
560 | /* add new descriptors to the ring */ | |
561 | for (i = curr_size; i < new_size; i++) { | |
562 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
563 | ||
564 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | |
565 | if (!ring[new_idx]) { | |
566 | while (i--) { | |
567 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
568 | ||
569 | ioat2_free_ring_ent(ring[new_idx], c); | |
570 | } | |
571 | kfree(ring); | |
572 | return false; | |
573 | } | |
574 | set_desc_id(ring[new_idx], new_idx); | |
575 | } | |
576 | ||
577 | /* hw link new descriptors */ | |
578 | for (i = curr_size-1; i < new_size; i++) { | |
579 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
580 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | |
581 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | |
582 | ||
583 | hw->next = next->txd.phys; | |
584 | } | |
585 | } else { | |
586 | struct ioat_dma_descriptor *hw; | |
587 | struct ioat_ring_ent *next; | |
588 | ||
589 | /* copy current descriptors to the new ring, dropping the | |
590 | * removed descriptors | |
591 | */ | |
592 | for (i = 0; i < new_size; i++) { | |
593 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
594 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
595 | ||
596 | ring[new_idx] = ioat->ring[curr_idx]; | |
597 | set_desc_id(ring[new_idx], new_idx); | |
598 | } | |
599 | ||
600 | /* free deleted descriptors */ | |
601 | for (i = new_size; i < curr_size; i++) { | |
602 | struct ioat_ring_ent *ent; | |
603 | ||
604 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | |
605 | ioat2_free_ring_ent(ent, c); | |
606 | } | |
607 | ||
608 | /* fix up hardware ring */ | |
609 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | |
610 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | |
611 | hw->next = next->txd.phys; | |
612 | } | |
613 | ||
614 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | |
615 | __func__, new_size); | |
616 | ||
617 | kfree(ioat->ring); | |
618 | ioat->ring = ring; | |
619 | ioat->alloc_order = order; | |
620 | ||
621 | return true; | |
5cbafa65 DW |
622 | } |
623 | ||
624 | /** | |
625 | * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops | |
626 | * @idx: gets starting descriptor index on successful allocation | |
627 | * @ioat: ioat2,3 channel (ring) to operate on | |
628 | * @num_descs: allocation length | |
629 | */ | |
630 | static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) | |
631 | { | |
632 | struct ioat_chan_common *chan = &ioat->base; | |
633 | ||
634 | spin_lock_bh(&ioat->ring_lock); | |
a309218a DW |
635 | /* never allow the last descriptor to be consumed, we need at |
636 | * least one free at all times to allow for on-the-fly ring | |
637 | * resizing. | |
638 | */ | |
639 | while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { | |
640 | if (reshape_ring(ioat, ioat->alloc_order + 1) && | |
641 | ioat2_ring_space(ioat) > num_descs) | |
642 | break; | |
643 | ||
5cbafa65 DW |
644 | if (printk_ratelimit()) |
645 | dev_dbg(to_dev(chan), | |
646 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | |
647 | __func__, num_descs, ioat->head, ioat->tail, | |
648 | ioat->issued); | |
649 | spin_unlock_bh(&ioat->ring_lock); | |
650 | ||
09c8a5b8 DW |
651 | /* progress reclaim in the allocation failure case we |
652 | * may be called under bh_disabled so we need to trigger | |
653 | * the timer event directly | |
654 | */ | |
655 | spin_lock_bh(&chan->cleanup_lock); | |
656 | if (jiffies > chan->timer.expires && | |
657 | timer_pending(&chan->timer)) { | |
658 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
659 | spin_unlock_bh(&chan->cleanup_lock); | |
660 | ioat2_timer_event((unsigned long) ioat); | |
661 | } else | |
662 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
663 | return -ENOMEM; |
664 | } | |
665 | ||
666 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | |
667 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
668 | ||
669 | *idx = ioat2_desc_alloc(ioat, num_descs); | |
670 | return 0; /* with ioat->ring_lock held */ | |
671 | } | |
672 | ||
673 | static struct dma_async_tx_descriptor * | |
674 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |
675 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
676 | { | |
677 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
678 | struct ioat_dma_descriptor *hw; | |
679 | struct ioat_ring_ent *desc; | |
680 | dma_addr_t dst = dma_dest; | |
681 | dma_addr_t src = dma_src; | |
682 | size_t total_len = len; | |
683 | int num_descs; | |
684 | u16 idx; | |
685 | int i; | |
686 | ||
687 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
688 | if (likely(num_descs) && | |
689 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | |
690 | /* pass */; | |
691 | else | |
692 | return NULL; | |
693 | for (i = 0; i < num_descs; i++) { | |
694 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); | |
695 | ||
696 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
697 | hw = desc->hw; | |
698 | ||
699 | hw->size = copy; | |
700 | hw->ctl = 0; | |
701 | hw->src_addr = src; | |
702 | hw->dst_addr = dst; | |
703 | ||
704 | len -= copy; | |
705 | dst += copy; | |
706 | src += copy; | |
6df9183a | 707 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
708 | } |
709 | ||
710 | desc->txd.flags = flags; | |
711 | desc->len = total_len; | |
712 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
128f2d56 | 713 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); |
5cbafa65 | 714 | hw->ctl_f.compl_write = 1; |
6df9183a | 715 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
716 | /* we leave the channel locked to ensure in order submission */ |
717 | ||
718 | return &desc->txd; | |
719 | } | |
720 | ||
721 | /** | |
722 | * ioat2_free_chan_resources - release all the descriptors | |
723 | * @chan: the channel to be cleaned | |
724 | */ | |
725 | static void ioat2_free_chan_resources(struct dma_chan *c) | |
726 | { | |
727 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
728 | struct ioat_chan_common *chan = &ioat->base; | |
729 | struct ioatdma_device *ioatdma_device = chan->device; | |
730 | struct ioat_ring_ent *desc; | |
731 | const u16 total_descs = 1 << ioat->alloc_order; | |
732 | int descs; | |
733 | int i; | |
734 | ||
735 | /* Before freeing channel resources first check | |
736 | * if they have been previously allocated for this channel. | |
737 | */ | |
738 | if (!ioat->ring) | |
739 | return; | |
740 | ||
741 | tasklet_disable(&chan->cleanup_task); | |
09c8a5b8 | 742 | del_timer_sync(&chan->timer); |
5cbafa65 DW |
743 | ioat2_cleanup(ioat); |
744 | ||
745 | /* Delay 100ms after reset to allow internal DMA logic to quiesce | |
746 | * before removing DMA descriptor resources. | |
747 | */ | |
748 | writeb(IOAT_CHANCMD_RESET, | |
749 | chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | |
750 | mdelay(100); | |
751 | ||
752 | spin_lock_bh(&ioat->ring_lock); | |
753 | descs = ioat2_ring_space(ioat); | |
6df9183a | 754 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
5cbafa65 DW |
755 | for (i = 0; i < descs; i++) { |
756 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | |
757 | ioat2_free_ring_ent(desc, c); | |
758 | } | |
759 | ||
760 | if (descs < total_descs) | |
761 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | |
762 | total_descs - descs); | |
763 | ||
764 | for (i = 0; i < total_descs - descs; i++) { | |
765 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
6df9183a | 766 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
767 | ioat2_free_ring_ent(desc, c); |
768 | } | |
769 | ||
770 | kfree(ioat->ring); | |
771 | ioat->ring = NULL; | |
772 | ioat->alloc_order = 0; | |
773 | pci_pool_free(ioatdma_device->completion_pool, | |
4fb9b9e8 DW |
774 | chan->completion, |
775 | chan->completion_dma); | |
5cbafa65 DW |
776 | spin_unlock_bh(&ioat->ring_lock); |
777 | ||
778 | chan->last_completion = 0; | |
4fb9b9e8 | 779 | chan->completion_dma = 0; |
5cbafa65 DW |
780 | ioat->pending = 0; |
781 | ioat->dmacount = 0; | |
5cbafa65 DW |
782 | } |
783 | ||
784 | static enum dma_status | |
785 | ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |
786 | dma_cookie_t *done, dma_cookie_t *used) | |
787 | { | |
788 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
789 | ||
790 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | |
791 | return DMA_SUCCESS; | |
792 | ||
793 | ioat2_cleanup(ioat); | |
794 | ||
795 | return ioat_is_complete(c, cookie, done, used); | |
796 | } | |
797 | ||
345d8523 | 798 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
799 | { |
800 | struct pci_dev *pdev = device->pdev; | |
801 | struct dma_device *dma; | |
802 | struct dma_chan *c; | |
803 | struct ioat_chan_common *chan; | |
804 | int err; | |
805 | ||
806 | device->enumerate_channels = ioat2_enumerate_channels; | |
807 | dma = &device->common; | |
808 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
809 | dma->device_issue_pending = ioat2_issue_pending; | |
810 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
811 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
812 | dma->device_is_tx_complete = ioat2_is_complete; | |
813 | ||
814 | err = ioat_probe(device); | |
815 | if (err) | |
816 | return err; | |
817 | ioat_set_tcp_copy_break(2048); | |
818 | ||
819 | list_for_each_entry(c, &dma->channels, device_node) { | |
820 | chan = to_chan_common(c); | |
821 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | |
822 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
823 | } | |
824 | ||
825 | err = ioat_register(device); | |
826 | if (err) | |
827 | return err; | |
828 | if (dca) | |
829 | device->dca = ioat2_dca_init(pdev, device->reg_base); | |
830 | ||
5cbafa65 DW |
831 | return err; |
832 | } | |
833 | ||
345d8523 | 834 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
835 | { |
836 | struct pci_dev *pdev = device->pdev; | |
837 | struct dma_device *dma; | |
838 | struct dma_chan *c; | |
839 | struct ioat_chan_common *chan; | |
840 | int err; | |
841 | u16 dev_id; | |
842 | ||
843 | device->enumerate_channels = ioat2_enumerate_channels; | |
844 | dma = &device->common; | |
845 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
846 | dma->device_issue_pending = ioat2_issue_pending; | |
847 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
848 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
849 | dma->device_is_tx_complete = ioat2_is_complete; | |
850 | ||
851 | /* -= IOAT ver.3 workarounds =- */ | |
852 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | |
853 | * that can cause stability issues for IOAT ver.3 | |
854 | */ | |
855 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | |
856 | ||
857 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | |
858 | * (workaround for spurious config parity error after restart) | |
859 | */ | |
860 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | |
861 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | |
862 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | |
863 | ||
864 | err = ioat_probe(device); | |
865 | if (err) | |
866 | return err; | |
867 | ioat_set_tcp_copy_break(262144); | |
868 | ||
869 | list_for_each_entry(c, &dma->channels, device_node) { | |
870 | chan = to_chan_common(c); | |
871 | writel(IOAT_DMA_DCA_ANY_CPU, | |
872 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
873 | } | |
874 | ||
875 | err = ioat_register(device); | |
876 | if (err) | |
877 | return err; | |
878 | if (dca) | |
879 | device->dca = ioat3_dca_init(pdev, device->reg_base); | |
880 | ||
881 | return err; | |
882 | } |