]>
Commit | Line | Data |
---|---|---|
5cbafa65 DW |
1 | /* |
2 | * Intel I/OAT DMA Linux driver | |
3 | * Copyright(c) 2004 - 2009 Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in | |
19 | * the file called "COPYING". | |
20 | * | |
21 | */ | |
22 | ||
23 | /* | |
24 | * This driver supports an Intel I/OAT DMA engine (versions >= 2), which | |
25 | * does asynchronous data movement and checksumming operations. | |
26 | */ | |
27 | ||
28 | #include <linux/init.h> | |
29 | #include <linux/module.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
5cbafa65 DW |
31 | #include <linux/pci.h> |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/dmaengine.h> | |
34 | #include <linux/delay.h> | |
35 | #include <linux/dma-mapping.h> | |
36 | #include <linux/workqueue.h> | |
37 | #include <linux/i7300_idle.h> | |
38 | #include "dma.h" | |
39 | #include "dma_v2.h" | |
40 | #include "registers.h" | |
41 | #include "hw.h" | |
42 | ||
bf40a686 | 43 | int ioat_ring_alloc_order = 8; |
5cbafa65 DW |
44 | module_param(ioat_ring_alloc_order, int, 0644); |
45 | MODULE_PARM_DESC(ioat_ring_alloc_order, | |
376ec376 DW |
46 | "ioat2+: allocate 2^n descriptors per channel" |
47 | " (default: 8 max: 16)"); | |
a309218a DW |
48 | static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; |
49 | module_param(ioat_ring_max_alloc_order, int, 0644); | |
50 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | |
376ec376 | 51 | "ioat2+: upper limit for ring size (default: 16)"); |
5cbafa65 | 52 | |
b094ad3b | 53 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) |
5cbafa65 | 54 | { |
281befa5 | 55 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 | 56 | |
376ec376 | 57 | ioat->dmacount += ioat2_ring_pending(ioat); |
5cbafa65 | 58 | ioat->issued = ioat->head; |
281befa5 DW |
59 | writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); |
60 | dev_dbg(to_dev(chan), | |
6df9183a DW |
61 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", |
62 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
5cbafa65 DW |
63 | } |
64 | ||
281befa5 | 65 | void ioat2_issue_pending(struct dma_chan *c) |
5cbafa65 | 66 | { |
281befa5 | 67 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
5cbafa65 | 68 | |
281befa5 | 69 | if (ioat2_ring_pending(ioat)) { |
074cc476 | 70 | spin_lock_bh(&ioat->prep_lock); |
5cbafa65 | 71 | __ioat2_issue_pending(ioat); |
074cc476 | 72 | spin_unlock_bh(&ioat->prep_lock); |
281befa5 | 73 | } |
5cbafa65 DW |
74 | } |
75 | ||
76 | /** | |
77 | * ioat2_update_pending - log pending descriptors | |
78 | * @ioat: ioat2+ channel | |
79 | * | |
281befa5 | 80 | * Check if the number of unsubmitted descriptors has exceeded the |
074cc476 | 81 | * watermark. Called with prep_lock held |
5cbafa65 DW |
82 | */ |
83 | static void ioat2_update_pending(struct ioat2_dma_chan *ioat) | |
84 | { | |
281befa5 | 85 | if (ioat2_ring_pending(ioat) > ioat_pending_level) |
5cbafa65 | 86 | __ioat2_issue_pending(ioat); |
5cbafa65 DW |
87 | } |
88 | ||
89 | static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
90 | { | |
5cbafa65 DW |
91 | struct ioat_ring_ent *desc; |
92 | struct ioat_dma_descriptor *hw; | |
5cbafa65 DW |
93 | |
94 | if (ioat2_ring_space(ioat) < 1) { | |
95 | dev_err(to_dev(&ioat->base), | |
96 | "Unable to start null desc - ring full\n"); | |
97 | return; | |
98 | } | |
99 | ||
6df9183a DW |
100 | dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", |
101 | __func__, ioat->head, ioat->tail, ioat->issued); | |
074cc476 | 102 | desc = ioat2_get_ring_ent(ioat, ioat->head); |
5cbafa65 DW |
103 | |
104 | hw = desc->hw; | |
105 | hw->ctl = 0; | |
106 | hw->ctl_f.null = 1; | |
107 | hw->ctl_f.int_en = 1; | |
108 | hw->ctl_f.compl_write = 1; | |
109 | /* set size to non-zero value (channel returns error when size is 0) */ | |
110 | hw->size = NULL_DESC_BUFFER_SIZE; | |
111 | hw->src_addr = 0; | |
112 | hw->dst_addr = 0; | |
113 | async_tx_ack(&desc->txd); | |
09c8a5b8 | 114 | ioat2_set_chainaddr(ioat, desc->txd.phys); |
6df9183a | 115 | dump_desc_dbg(ioat, desc); |
074cc476 DW |
116 | wmb(); |
117 | ioat->head += 1; | |
5cbafa65 DW |
118 | __ioat2_issue_pending(ioat); |
119 | } | |
120 | ||
121 | static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) | |
122 | { | |
074cc476 | 123 | spin_lock_bh(&ioat->prep_lock); |
5cbafa65 | 124 | __ioat2_start_null_desc(ioat); |
074cc476 | 125 | spin_unlock_bh(&ioat->prep_lock); |
5cbafa65 DW |
126 | } |
127 | ||
09c8a5b8 | 128 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
5cbafa65 | 129 | { |
5cbafa65 | 130 | struct ioat_chan_common *chan = &ioat->base; |
09c8a5b8 | 131 | struct dma_async_tx_descriptor *tx; |
5cbafa65 DW |
132 | struct ioat_ring_ent *desc; |
133 | bool seen_current = false; | |
134 | u16 active; | |
074cc476 | 135 | int idx = ioat->tail, i; |
5cbafa65 | 136 | |
6df9183a DW |
137 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
138 | __func__, ioat->head, ioat->tail, ioat->issued); | |
139 | ||
5cbafa65 DW |
140 | active = ioat2_ring_active(ioat); |
141 | for (i = 0; i < active && !seen_current; i++) { | |
074cc476 DW |
142 | smp_read_barrier_depends(); |
143 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | |
144 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
5cbafa65 | 145 | tx = &desc->txd; |
6df9183a | 146 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
147 | if (tx->cookie) { |
148 | ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); | |
149 | chan->completed_cookie = tx->cookie; | |
150 | tx->cookie = 0; | |
151 | if (tx->callback) { | |
152 | tx->callback(tx->callback_param); | |
153 | tx->callback = NULL; | |
154 | } | |
155 | } | |
156 | ||
157 | if (tx->phys == phys_complete) | |
158 | seen_current = true; | |
159 | } | |
074cc476 DW |
160 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
161 | ioat->tail = idx + i; | |
aa75db00 | 162 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
5cbafa65 DW |
163 | |
164 | chan->last_completion = phys_complete; | |
074cc476 | 165 | if (active - i == 0) { |
09c8a5b8 DW |
166 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
167 | __func__); | |
168 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
a309218a | 169 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
09c8a5b8 DW |
170 | } |
171 | } | |
172 | ||
173 | /** | |
174 | * ioat2_cleanup - clean finished descriptors (advance tail pointer) | |
175 | * @chan: ioat channel to be cleaned up | |
176 | */ | |
177 | static void ioat2_cleanup(struct ioat2_dma_chan *ioat) | |
178 | { | |
179 | struct ioat_chan_common *chan = &ioat->base; | |
180 | unsigned long phys_complete; | |
5cbafa65 | 181 | |
074cc476 DW |
182 | spin_lock_bh(&chan->cleanup_lock); |
183 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
184 | __cleanup(ioat, phys_complete); | |
5cbafa65 DW |
185 | spin_unlock_bh(&chan->cleanup_lock); |
186 | } | |
187 | ||
aa4d72ae | 188 | void ioat2_cleanup_event(unsigned long data) |
5cbafa65 | 189 | { |
aa4d72ae | 190 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
5cbafa65 DW |
191 | |
192 | ioat2_cleanup(ioat); | |
f6ab95b5 | 193 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 DW |
194 | } |
195 | ||
bf40a686 | 196 | void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) |
09c8a5b8 DW |
197 | { |
198 | struct ioat_chan_common *chan = &ioat->base; | |
199 | ||
200 | /* set the tail to be re-issued */ | |
201 | ioat->issued = ioat->tail; | |
202 | ioat->dmacount = 0; | |
203 | set_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
204 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
205 | ||
206 | dev_dbg(to_dev(chan), | |
207 | "%s: head: %#x tail: %#x issued: %#x count: %#x\n", | |
208 | __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); | |
209 | ||
210 | if (ioat2_ring_pending(ioat)) { | |
211 | struct ioat_ring_ent *desc; | |
212 | ||
213 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | |
214 | ioat2_set_chainaddr(ioat, desc->txd.phys); | |
215 | __ioat2_issue_pending(ioat); | |
216 | } else | |
217 | __ioat2_start_null_desc(ioat); | |
218 | } | |
219 | ||
a6d52d70 | 220 | int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) |
09c8a5b8 | 221 | { |
a6d52d70 DW |
222 | unsigned long end = jiffies + tmo; |
223 | int err = 0; | |
09c8a5b8 DW |
224 | u32 status; |
225 | ||
226 | status = ioat_chansts(chan); | |
227 | if (is_ioat_active(status) || is_ioat_idle(status)) | |
228 | ioat_suspend(chan); | |
229 | while (is_ioat_active(status) || is_ioat_idle(status)) { | |
7e55a70c | 230 | if (tmo && time_after(jiffies, end)) { |
a6d52d70 DW |
231 | err = -ETIMEDOUT; |
232 | break; | |
233 | } | |
09c8a5b8 DW |
234 | status = ioat_chansts(chan); |
235 | cpu_relax(); | |
236 | } | |
237 | ||
a6d52d70 DW |
238 | return err; |
239 | } | |
240 | ||
241 | int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | |
242 | { | |
243 | unsigned long end = jiffies + tmo; | |
244 | int err = 0; | |
245 | ||
246 | ioat_reset(chan); | |
247 | while (ioat_reset_pending(chan)) { | |
248 | if (end && time_after(jiffies, end)) { | |
249 | err = -ETIMEDOUT; | |
250 | break; | |
251 | } | |
252 | cpu_relax(); | |
253 | } | |
254 | ||
255 | return err; | |
256 | } | |
257 | ||
258 | static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |
259 | { | |
260 | struct ioat_chan_common *chan = &ioat->base; | |
261 | unsigned long phys_complete; | |
262 | ||
263 | ioat2_quiesce(chan, 0); | |
09c8a5b8 DW |
264 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
265 | __cleanup(ioat, phys_complete); | |
266 | ||
bf40a686 | 267 | __ioat2_restart_chan(ioat); |
09c8a5b8 DW |
268 | } |
269 | ||
e3232714 | 270 | void ioat2_timer_event(unsigned long data) |
09c8a5b8 | 271 | { |
aa4d72ae | 272 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
09c8a5b8 DW |
273 | struct ioat_chan_common *chan = &ioat->base; |
274 | ||
09c8a5b8 DW |
275 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { |
276 | unsigned long phys_complete; | |
277 | u64 status; | |
278 | ||
09c8a5b8 DW |
279 | status = ioat_chansts(chan); |
280 | ||
281 | /* when halted due to errors check for channel | |
282 | * programming errors before advancing the completion state | |
283 | */ | |
284 | if (is_ioat_halted(status)) { | |
285 | u32 chanerr; | |
286 | ||
287 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
b57014de DW |
288 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", |
289 | __func__, chanerr); | |
556ab45f DW |
290 | if (test_bit(IOAT_RUN, &chan->state)) |
291 | BUG_ON(is_ioat_bug(chanerr)); | |
292 | else /* we never got off the ground */ | |
293 | return; | |
09c8a5b8 DW |
294 | } |
295 | ||
296 | /* if we haven't made progress and we have already | |
297 | * acknowledged a pending completion once, then be more | |
298 | * forceful with a restart | |
299 | */ | |
074cc476 DW |
300 | spin_lock_bh(&chan->cleanup_lock); |
301 | if (ioat_cleanup_preamble(chan, &phys_complete)) { | |
09c8a5b8 | 302 | __cleanup(ioat, phys_complete); |
074cc476 DW |
303 | } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { |
304 | spin_lock_bh(&ioat->prep_lock); | |
09c8a5b8 | 305 | ioat2_restart_channel(ioat); |
074cc476 DW |
306 | spin_unlock_bh(&ioat->prep_lock); |
307 | } else { | |
09c8a5b8 DW |
308 | set_bit(IOAT_COMPLETION_ACK, &chan->state); |
309 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
310 | } | |
074cc476 | 311 | spin_unlock_bh(&chan->cleanup_lock); |
a309218a DW |
312 | } else { |
313 | u16 active; | |
314 | ||
315 | /* if the ring is idle, empty, and oversized try to step | |
316 | * down the size | |
317 | */ | |
074cc476 DW |
318 | spin_lock_bh(&chan->cleanup_lock); |
319 | spin_lock_bh(&ioat->prep_lock); | |
a309218a DW |
320 | active = ioat2_ring_active(ioat); |
321 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | |
322 | reshape_ring(ioat, ioat->alloc_order-1); | |
074cc476 DW |
323 | spin_unlock_bh(&ioat->prep_lock); |
324 | spin_unlock_bh(&chan->cleanup_lock); | |
a309218a DW |
325 | |
326 | /* keep shrinking until we get back to our minimum | |
327 | * default size | |
328 | */ | |
329 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
330 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
09c8a5b8 | 331 | } |
09c8a5b8 DW |
332 | } |
333 | ||
a6d52d70 DW |
334 | static int ioat2_reset_hw(struct ioat_chan_common *chan) |
335 | { | |
336 | /* throw away whatever the channel was doing and get it initialized */ | |
337 | u32 chanerr; | |
338 | ||
339 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | |
340 | ||
341 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
342 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
343 | ||
344 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | |
345 | } | |
346 | ||
5cbafa65 DW |
347 | /** |
348 | * ioat2_enumerate_channels - find and initialize the device's channels | |
349 | * @device: the device to be enumerated | |
350 | */ | |
bf40a686 | 351 | int ioat2_enumerate_channels(struct ioatdma_device *device) |
5cbafa65 DW |
352 | { |
353 | struct ioat2_dma_chan *ioat; | |
354 | struct device *dev = &device->pdev->dev; | |
355 | struct dma_device *dma = &device->common; | |
356 | u8 xfercap_log; | |
357 | int i; | |
358 | ||
359 | INIT_LIST_HEAD(&dma->channels); | |
360 | dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); | |
bb320786 DW |
361 | dma->chancnt &= 0x1f; /* bits [4:0] valid */ |
362 | if (dma->chancnt > ARRAY_SIZE(device->idx)) { | |
363 | dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", | |
364 | dma->chancnt, ARRAY_SIZE(device->idx)); | |
365 | dma->chancnt = ARRAY_SIZE(device->idx); | |
366 | } | |
5cbafa65 | 367 | xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); |
bb320786 | 368 | xfercap_log &= 0x1f; /* bits [4:0] valid */ |
5cbafa65 DW |
369 | if (xfercap_log == 0) |
370 | return 0; | |
6df9183a | 371 | dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); |
5cbafa65 DW |
372 | |
373 | /* FIXME which i/oat version is i7300? */ | |
374 | #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL | |
375 | if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) | |
376 | dma->chancnt--; | |
377 | #endif | |
378 | for (i = 0; i < dma->chancnt; i++) { | |
379 | ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); | |
380 | if (!ioat) | |
381 | break; | |
382 | ||
aa4d72ae | 383 | ioat_init_channel(device, &ioat->base, i); |
5cbafa65 | 384 | ioat->xfercap_log = xfercap_log; |
074cc476 | 385 | spin_lock_init(&ioat->prep_lock); |
a6d52d70 DW |
386 | if (device->reset_hw(&ioat->base)) { |
387 | i = 0; | |
388 | break; | |
389 | } | |
5cbafa65 DW |
390 | } |
391 | dma->chancnt = i; | |
392 | return i; | |
393 | } | |
394 | ||
395 | static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
396 | { | |
397 | struct dma_chan *c = tx->chan; | |
398 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
09c8a5b8 | 399 | struct ioat_chan_common *chan = &ioat->base; |
5cbafa65 DW |
400 | dma_cookie_t cookie = c->cookie; |
401 | ||
402 | cookie++; | |
403 | if (cookie < 0) | |
404 | cookie = 1; | |
405 | tx->cookie = cookie; | |
406 | c->cookie = cookie; | |
6df9183a DW |
407 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
408 | ||
09c8a5b8 DW |
409 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) |
410 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
074cc476 DW |
411 | |
412 | /* make descriptor updates visible before advancing ioat->head, | |
413 | * this is purposefully not smp_wmb() since we are also | |
414 | * publishing the descriptor updates to a dma device | |
415 | */ | |
416 | wmb(); | |
417 | ||
418 | ioat->head += ioat->produce; | |
419 | ||
5cbafa65 | 420 | ioat2_update_pending(ioat); |
074cc476 | 421 | spin_unlock_bh(&ioat->prep_lock); |
5cbafa65 DW |
422 | |
423 | return cookie; | |
424 | } | |
425 | ||
a309218a | 426 | static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) |
5cbafa65 DW |
427 | { |
428 | struct ioat_dma_descriptor *hw; | |
429 | struct ioat_ring_ent *desc; | |
430 | struct ioatdma_device *dma; | |
431 | dma_addr_t phys; | |
432 | ||
433 | dma = to_ioatdma_device(chan->device); | |
a309218a | 434 | hw = pci_pool_alloc(dma->dma_pool, flags, &phys); |
5cbafa65 DW |
435 | if (!hw) |
436 | return NULL; | |
437 | memset(hw, 0, sizeof(*hw)); | |
438 | ||
162b96e6 | 439 | desc = kmem_cache_alloc(ioat2_cache, flags); |
5cbafa65 DW |
440 | if (!desc) { |
441 | pci_pool_free(dma->dma_pool, hw, phys); | |
442 | return NULL; | |
443 | } | |
162b96e6 | 444 | memset(desc, 0, sizeof(*desc)); |
5cbafa65 DW |
445 | |
446 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
447 | desc->txd.tx_submit = ioat2_tx_submit_unlock; | |
448 | desc->hw = hw; | |
449 | desc->txd.phys = phys; | |
450 | return desc; | |
451 | } | |
452 | ||
453 | static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | |
454 | { | |
455 | struct ioatdma_device *dma; | |
456 | ||
457 | dma = to_ioatdma_device(chan->device); | |
458 | pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); | |
162b96e6 | 459 | kmem_cache_free(ioat2_cache, desc); |
5cbafa65 DW |
460 | } |
461 | ||
a309218a DW |
462 | static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
463 | { | |
464 | struct ioat_ring_ent **ring; | |
465 | int descs = 1 << order; | |
466 | int i; | |
467 | ||
468 | if (order > ioat_get_max_alloc_order()) | |
469 | return NULL; | |
470 | ||
471 | /* allocate the array to hold the software ring */ | |
472 | ring = kcalloc(descs, sizeof(*ring), flags); | |
473 | if (!ring) | |
474 | return NULL; | |
475 | for (i = 0; i < descs; i++) { | |
476 | ring[i] = ioat2_alloc_ring_ent(c, flags); | |
477 | if (!ring[i]) { | |
478 | while (i--) | |
479 | ioat2_free_ring_ent(ring[i], c); | |
480 | kfree(ring); | |
481 | return NULL; | |
482 | } | |
483 | set_desc_id(ring[i], i); | |
484 | } | |
485 | ||
486 | /* link descs */ | |
487 | for (i = 0; i < descs-1; i++) { | |
488 | struct ioat_ring_ent *next = ring[i+1]; | |
489 | struct ioat_dma_descriptor *hw = ring[i]->hw; | |
490 | ||
491 | hw->next = next->txd.phys; | |
492 | } | |
493 | ring[i]->hw->next = ring[0]->txd.phys; | |
494 | ||
495 | return ring; | |
496 | } | |
497 | ||
556ab45f DW |
498 | void ioat2_free_chan_resources(struct dma_chan *c); |
499 | ||
5cbafa65 DW |
500 | /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring |
501 | * @chan: channel to be initialized | |
502 | */ | |
bf40a686 | 503 | int ioat2_alloc_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
504 | { |
505 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
506 | struct ioat_chan_common *chan = &ioat->base; | |
507 | struct ioat_ring_ent **ring; | |
556ab45f | 508 | u64 status; |
a309218a | 509 | int order; |
5cbafa65 DW |
510 | |
511 | /* have we already been set up? */ | |
512 | if (ioat->ring) | |
513 | return 1 << ioat->alloc_order; | |
514 | ||
515 | /* Setup register to interrupt and write completion status on error */ | |
f6ab95b5 | 516 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); |
5cbafa65 | 517 | |
5cbafa65 DW |
518 | /* allocate a completion writeback area */ |
519 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | |
4fb9b9e8 DW |
520 | chan->completion = pci_pool_alloc(chan->device->completion_pool, |
521 | GFP_KERNEL, &chan->completion_dma); | |
522 | if (!chan->completion) | |
5cbafa65 DW |
523 | return -ENOMEM; |
524 | ||
4fb9b9e8 DW |
525 | memset(chan->completion, 0, sizeof(*chan->completion)); |
526 | writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, | |
5cbafa65 | 527 | chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); |
4fb9b9e8 | 528 | writel(((u64) chan->completion_dma) >> 32, |
5cbafa65 DW |
529 | chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
530 | ||
a309218a DW |
531 | order = ioat_get_alloc_order(); |
532 | ring = ioat2_alloc_ring(c, order, GFP_KERNEL); | |
5cbafa65 DW |
533 | if (!ring) |
534 | return -ENOMEM; | |
5cbafa65 | 535 | |
074cc476 DW |
536 | spin_lock_bh(&chan->cleanup_lock); |
537 | spin_lock_bh(&ioat->prep_lock); | |
5cbafa65 DW |
538 | ioat->ring = ring; |
539 | ioat->head = 0; | |
540 | ioat->issued = 0; | |
541 | ioat->tail = 0; | |
a309218a | 542 | ioat->alloc_order = order; |
074cc476 DW |
543 | spin_unlock_bh(&ioat->prep_lock); |
544 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
545 | |
546 | tasklet_enable(&chan->cleanup_task); | |
547 | ioat2_start_null_desc(ioat); | |
548 | ||
556ab45f DW |
549 | /* check that we got off the ground */ |
550 | udelay(5); | |
551 | status = ioat_chansts(chan); | |
552 | if (is_ioat_active(status) || is_ioat_idle(status)) { | |
553 | set_bit(IOAT_RUN, &chan->state); | |
554 | return 1 << ioat->alloc_order; | |
555 | } else { | |
556 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
557 | ||
558 | dev_WARN(to_dev(chan), | |
559 | "failed to start channel chanerr: %#x\n", chanerr); | |
560 | ioat2_free_chan_resources(c); | |
561 | return -EFAULT; | |
562 | } | |
a309218a DW |
563 | } |
564 | ||
bf40a686 | 565 | bool reshape_ring(struct ioat2_dma_chan *ioat, int order) |
a309218a DW |
566 | { |
567 | /* reshape differs from normal ring allocation in that we want | |
568 | * to allocate a new software ring while only | |
569 | * extending/truncating the hardware ring | |
570 | */ | |
571 | struct ioat_chan_common *chan = &ioat->base; | |
572 | struct dma_chan *c = &chan->common; | |
abb12dfd | 573 | const u16 curr_size = ioat2_ring_size(ioat); |
a309218a DW |
574 | const u16 active = ioat2_ring_active(ioat); |
575 | const u16 new_size = 1 << order; | |
576 | struct ioat_ring_ent **ring; | |
577 | u16 i; | |
578 | ||
579 | if (order > ioat_get_max_alloc_order()) | |
580 | return false; | |
581 | ||
582 | /* double check that we have at least 1 free descriptor */ | |
583 | if (active == curr_size) | |
584 | return false; | |
585 | ||
586 | /* when shrinking, verify that we can hold the current active | |
587 | * set in the new ring | |
588 | */ | |
589 | if (active >= new_size) | |
590 | return false; | |
591 | ||
592 | /* allocate the array to hold the software ring */ | |
593 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | |
594 | if (!ring) | |
595 | return false; | |
596 | ||
597 | /* allocate/trim descriptors as needed */ | |
598 | if (new_size > curr_size) { | |
599 | /* copy current descriptors to the new ring */ | |
600 | for (i = 0; i < curr_size; i++) { | |
601 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
602 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
603 | ||
604 | ring[new_idx] = ioat->ring[curr_idx]; | |
605 | set_desc_id(ring[new_idx], new_idx); | |
606 | } | |
607 | ||
608 | /* add new descriptors to the ring */ | |
609 | for (i = curr_size; i < new_size; i++) { | |
610 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
611 | ||
612 | ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); | |
613 | if (!ring[new_idx]) { | |
614 | while (i--) { | |
615 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
616 | ||
617 | ioat2_free_ring_ent(ring[new_idx], c); | |
618 | } | |
619 | kfree(ring); | |
620 | return false; | |
621 | } | |
622 | set_desc_id(ring[new_idx], new_idx); | |
623 | } | |
624 | ||
625 | /* hw link new descriptors */ | |
626 | for (i = curr_size-1; i < new_size; i++) { | |
627 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
628 | struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; | |
629 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | |
630 | ||
631 | hw->next = next->txd.phys; | |
632 | } | |
633 | } else { | |
634 | struct ioat_dma_descriptor *hw; | |
635 | struct ioat_ring_ent *next; | |
636 | ||
637 | /* copy current descriptors to the new ring, dropping the | |
638 | * removed descriptors | |
639 | */ | |
640 | for (i = 0; i < new_size; i++) { | |
641 | u16 curr_idx = (ioat->tail+i) & (curr_size-1); | |
642 | u16 new_idx = (ioat->tail+i) & (new_size-1); | |
643 | ||
644 | ring[new_idx] = ioat->ring[curr_idx]; | |
645 | set_desc_id(ring[new_idx], new_idx); | |
646 | } | |
647 | ||
648 | /* free deleted descriptors */ | |
649 | for (i = new_size; i < curr_size; i++) { | |
650 | struct ioat_ring_ent *ent; | |
651 | ||
652 | ent = ioat2_get_ring_ent(ioat, ioat->tail+i); | |
653 | ioat2_free_ring_ent(ent, c); | |
654 | } | |
655 | ||
656 | /* fix up hardware ring */ | |
657 | hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; | |
658 | next = ring[(ioat->tail+new_size) & (new_size-1)]; | |
659 | hw->next = next->txd.phys; | |
660 | } | |
661 | ||
662 | dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", | |
663 | __func__, new_size); | |
664 | ||
665 | kfree(ioat->ring); | |
666 | ioat->ring = ring; | |
667 | ioat->alloc_order = order; | |
668 | ||
669 | return true; | |
5cbafa65 DW |
670 | } |
671 | ||
672 | /** | |
074cc476 | 673 | * ioat2_check_space_lock - verify space and grab ring producer lock |
5cbafa65 DW |
674 | * @ioat: ioat2,3 channel (ring) to operate on |
675 | * @num_descs: allocation length | |
676 | */ | |
074cc476 | 677 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) |
5cbafa65 DW |
678 | { |
679 | struct ioat_chan_common *chan = &ioat->base; | |
074cc476 | 680 | bool retry; |
5cbafa65 | 681 | |
074cc476 DW |
682 | retry: |
683 | spin_lock_bh(&ioat->prep_lock); | |
a309218a DW |
684 | /* never allow the last descriptor to be consumed, we need at |
685 | * least one free at all times to allow for on-the-fly ring | |
686 | * resizing. | |
687 | */ | |
074cc476 DW |
688 | if (likely(ioat2_ring_space(ioat) > num_descs)) { |
689 | dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", | |
690 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
691 | ioat->produce = num_descs; | |
692 | return 0; /* with ioat->prep_lock held */ | |
5cbafa65 | 693 | } |
074cc476 DW |
694 | retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); |
695 | spin_unlock_bh(&ioat->prep_lock); | |
5cbafa65 | 696 | |
074cc476 DW |
697 | /* is another cpu already trying to expand the ring? */ |
698 | if (retry) | |
699 | goto retry; | |
5cbafa65 | 700 | |
074cc476 DW |
701 | spin_lock_bh(&chan->cleanup_lock); |
702 | spin_lock_bh(&ioat->prep_lock); | |
703 | retry = reshape_ring(ioat, ioat->alloc_order + 1); | |
704 | clear_bit(IOAT_RESHAPE_PENDING, &chan->state); | |
705 | spin_unlock_bh(&ioat->prep_lock); | |
706 | spin_unlock_bh(&chan->cleanup_lock); | |
707 | ||
708 | /* if we were able to expand the ring retry the allocation */ | |
709 | if (retry) | |
710 | goto retry; | |
711 | ||
712 | if (printk_ratelimit()) | |
713 | dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", | |
714 | __func__, num_descs, ioat->head, ioat->tail, ioat->issued); | |
715 | ||
716 | /* progress reclaim in the allocation failure case we may be | |
717 | * called under bh_disabled so we need to trigger the timer | |
718 | * event directly | |
719 | */ | |
720 | if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { | |
721 | struct ioatdma_device *device = chan->device; | |
722 | ||
723 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
724 | device->timer_fn((unsigned long) &chan->common); | |
725 | } | |
726 | ||
727 | return -ENOMEM; | |
5cbafa65 DW |
728 | } |
729 | ||
bf40a686 | 730 | struct dma_async_tx_descriptor * |
5cbafa65 DW |
731 | ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, |
732 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
733 | { | |
734 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
735 | struct ioat_dma_descriptor *hw; | |
736 | struct ioat_ring_ent *desc; | |
737 | dma_addr_t dst = dma_dest; | |
738 | dma_addr_t src = dma_src; | |
739 | size_t total_len = len; | |
074cc476 | 740 | int num_descs, idx, i; |
5cbafa65 DW |
741 | |
742 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
074cc476 DW |
743 | if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) |
744 | idx = ioat->head; | |
5cbafa65 DW |
745 | else |
746 | return NULL; | |
f477f5b3 AM |
747 | i = 0; |
748 | do { | |
5cbafa65 DW |
749 | size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); |
750 | ||
751 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
752 | hw = desc->hw; | |
753 | ||
754 | hw->size = copy; | |
755 | hw->ctl = 0; | |
756 | hw->src_addr = src; | |
757 | hw->dst_addr = dst; | |
758 | ||
759 | len -= copy; | |
760 | dst += copy; | |
761 | src += copy; | |
6df9183a | 762 | dump_desc_dbg(ioat, desc); |
f477f5b3 | 763 | } while (++i < num_descs); |
5cbafa65 DW |
764 | |
765 | desc->txd.flags = flags; | |
766 | desc->len = total_len; | |
767 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
128f2d56 | 768 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); |
5cbafa65 | 769 | hw->ctl_f.compl_write = 1; |
6df9183a | 770 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
771 | /* we leave the channel locked to ensure in order submission */ |
772 | ||
773 | return &desc->txd; | |
774 | } | |
775 | ||
776 | /** | |
777 | * ioat2_free_chan_resources - release all the descriptors | |
778 | * @chan: the channel to be cleaned | |
779 | */ | |
bf40a686 | 780 | void ioat2_free_chan_resources(struct dma_chan *c) |
5cbafa65 DW |
781 | { |
782 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
783 | struct ioat_chan_common *chan = &ioat->base; | |
bf40a686 | 784 | struct ioatdma_device *device = chan->device; |
5cbafa65 DW |
785 | struct ioat_ring_ent *desc; |
786 | const u16 total_descs = 1 << ioat->alloc_order; | |
787 | int descs; | |
788 | int i; | |
789 | ||
790 | /* Before freeing channel resources first check | |
791 | * if they have been previously allocated for this channel. | |
792 | */ | |
793 | if (!ioat->ring) | |
794 | return; | |
795 | ||
796 | tasklet_disable(&chan->cleanup_task); | |
09c8a5b8 | 797 | del_timer_sync(&chan->timer); |
aa4d72ae | 798 | device->cleanup_fn((unsigned long) c); |
a6d52d70 | 799 | device->reset_hw(chan); |
556ab45f | 800 | clear_bit(IOAT_RUN, &chan->state); |
5cbafa65 | 801 | |
074cc476 DW |
802 | spin_lock_bh(&chan->cleanup_lock); |
803 | spin_lock_bh(&ioat->prep_lock); | |
5cbafa65 | 804 | descs = ioat2_ring_space(ioat); |
6df9183a | 805 | dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); |
5cbafa65 DW |
806 | for (i = 0; i < descs; i++) { |
807 | desc = ioat2_get_ring_ent(ioat, ioat->head + i); | |
808 | ioat2_free_ring_ent(desc, c); | |
809 | } | |
810 | ||
811 | if (descs < total_descs) | |
812 | dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", | |
813 | total_descs - descs); | |
814 | ||
815 | for (i = 0; i < total_descs - descs; i++) { | |
816 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
6df9183a | 817 | dump_desc_dbg(ioat, desc); |
5cbafa65 DW |
818 | ioat2_free_ring_ent(desc, c); |
819 | } | |
820 | ||
821 | kfree(ioat->ring); | |
822 | ioat->ring = NULL; | |
823 | ioat->alloc_order = 0; | |
bf40a686 | 824 | pci_pool_free(device->completion_pool, chan->completion, |
4fb9b9e8 | 825 | chan->completion_dma); |
074cc476 DW |
826 | spin_unlock_bh(&ioat->prep_lock); |
827 | spin_unlock_bh(&chan->cleanup_lock); | |
5cbafa65 DW |
828 | |
829 | chan->last_completion = 0; | |
4fb9b9e8 | 830 | chan->completion_dma = 0; |
5cbafa65 | 831 | ioat->dmacount = 0; |
5cbafa65 DW |
832 | } |
833 | ||
5669e31c DW |
834 | static ssize_t ring_size_show(struct dma_chan *c, char *page) |
835 | { | |
836 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
837 | ||
838 | return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); | |
839 | } | |
840 | static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); | |
841 | ||
842 | static ssize_t ring_active_show(struct dma_chan *c, char *page) | |
843 | { | |
844 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
845 | ||
846 | /* ...taken outside the lock, no need to be precise */ | |
847 | return sprintf(page, "%d\n", ioat2_ring_active(ioat)); | |
848 | } | |
849 | static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); | |
850 | ||
851 | static struct attribute *ioat2_attrs[] = { | |
852 | &ring_size_attr.attr, | |
853 | &ring_active_attr.attr, | |
854 | &ioat_cap_attr.attr, | |
855 | &ioat_version_attr.attr, | |
856 | NULL, | |
857 | }; | |
858 | ||
859 | struct kobj_type ioat2_ktype = { | |
860 | .sysfs_ops = &ioat_sysfs_ops, | |
861 | .default_attrs = ioat2_attrs, | |
862 | }; | |
863 | ||
345d8523 | 864 | int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) |
5cbafa65 DW |
865 | { |
866 | struct pci_dev *pdev = device->pdev; | |
867 | struct dma_device *dma; | |
868 | struct dma_chan *c; | |
869 | struct ioat_chan_common *chan; | |
870 | int err; | |
871 | ||
872 | device->enumerate_channels = ioat2_enumerate_channels; | |
a6d52d70 | 873 | device->reset_hw = ioat2_reset_hw; |
aa4d72ae | 874 | device->cleanup_fn = ioat2_cleanup_event; |
bf40a686 | 875 | device->timer_fn = ioat2_timer_event; |
9de6fc71 | 876 | device->self_test = ioat_dma_self_test; |
5cbafa65 DW |
877 | dma = &device->common; |
878 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
879 | dma->device_issue_pending = ioat2_issue_pending; | |
880 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
881 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
07934481 | 882 | dma->device_tx_status = ioat_tx_status; |
5cbafa65 DW |
883 | |
884 | err = ioat_probe(device); | |
885 | if (err) | |
886 | return err; | |
887 | ioat_set_tcp_copy_break(2048); | |
888 | ||
889 | list_for_each_entry(c, &dma->channels, device_node) { | |
890 | chan = to_chan_common(c); | |
891 | writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, | |
892 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
893 | } | |
894 | ||
895 | err = ioat_register(device); | |
896 | if (err) | |
897 | return err; | |
5669e31c DW |
898 | |
899 | ioat_kobject_add(device, &ioat2_ktype); | |
900 | ||
5cbafa65 DW |
901 | if (dca) |
902 | device->dca = ioat2_dca_init(pdev, device->reg_base); | |
903 | ||
5cbafa65 DW |
904 | return err; |
905 | } |