]>
Commit | Line | Data |
---|---|---|
bf40a686 DW |
1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
3 | * redistributing this file, you may do so under either license. | |
4 | * | |
5 | * GPL LICENSE SUMMARY | |
6 | * | |
7 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | * | |
22 | * The full GNU General Public License is included in this distribution in | |
23 | * the file called "COPYING". | |
24 | * | |
25 | * BSD LICENSE | |
26 | * | |
27 | * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. | |
28 | * | |
29 | * Redistribution and use in source and binary forms, with or without | |
30 | * modification, are permitted provided that the following conditions are met: | |
31 | * | |
32 | * * Redistributions of source code must retain the above copyright | |
33 | * notice, this list of conditions and the following disclaimer. | |
34 | * * Redistributions in binary form must reproduce the above copyright | |
35 | * notice, this list of conditions and the following disclaimer in | |
36 | * the documentation and/or other materials provided with the | |
37 | * distribution. | |
38 | * * Neither the name of Intel Corporation nor the names of its | |
39 | * contributors may be used to endorse or promote products derived | |
40 | * from this software without specific prior written permission. | |
41 | * | |
42 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
43 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
46 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
47 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
48 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
49 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
50 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
51 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
52 | * POSSIBILITY OF SUCH DAMAGE. | |
53 | */ | |
54 | ||
55 | /* | |
56 | * Support routines for v3+ hardware | |
57 | */ | |
58 | ||
59 | #include <linux/pci.h> | |
5a0e3ad6 | 60 | #include <linux/gfp.h> |
bf40a686 DW |
61 | #include <linux/dmaengine.h> |
62 | #include <linux/dma-mapping.h> | |
63 | #include "registers.h" | |
64 | #include "hw.h" | |
65 | #include "dma.h" | |
66 | #include "dma_v2.h" | |
67 | ||
b094ad3b DW |
68 | /* ioat hardware assumes at least two sources for raid operations */ |
69 | #define src_cnt_to_sw(x) ((x) + 2) | |
70 | #define src_cnt_to_hw(x) ((x) - 2) | |
71 | ||
72 | /* provide a lookup table for setting the source address in the base or | |
d69d235b | 73 | * extended descriptor of an xor or pq descriptor |
b094ad3b DW |
74 | */ |
75 | static const u8 xor_idx_to_desc __read_mostly = 0xd0; | |
76 | static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; | |
d69d235b DW |
77 | static const u8 pq_idx_to_desc __read_mostly = 0xf8; |
78 | static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; | |
b094ad3b DW |
79 | |
80 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | |
81 | { | |
82 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | |
83 | ||
84 | return raw->field[xor_idx_to_field[idx]]; | |
85 | } | |
86 | ||
87 | static void xor_set_src(struct ioat_raw_descriptor *descs[2], | |
88 | dma_addr_t addr, u32 offset, int idx) | |
89 | { | |
90 | struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; | |
91 | ||
92 | raw->field[xor_idx_to_field[idx]] = addr + offset; | |
93 | } | |
94 | ||
d69d235b DW |
95 | static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) |
96 | { | |
97 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | |
98 | ||
99 | return raw->field[pq_idx_to_field[idx]]; | |
100 | } | |
101 | ||
102 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], | |
103 | dma_addr_t addr, u32 offset, u8 coef, int idx) | |
104 | { | |
105 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; | |
106 | struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; | |
107 | ||
108 | raw->field[pq_idx_to_field[idx]] = addr + offset; | |
109 | pq->coef[idx] = coef; | |
110 | } | |
111 | ||
bf40a686 | 112 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, |
b094ad3b | 113 | struct ioat_ring_ent *desc, int idx) |
bf40a686 DW |
114 | { |
115 | struct ioat_chan_common *chan = &ioat->base; | |
116 | struct pci_dev *pdev = chan->device->pdev; | |
117 | size_t len = desc->len; | |
118 | size_t offset = len - desc->hw->size; | |
119 | struct dma_async_tx_descriptor *tx = &desc->txd; | |
120 | enum dma_ctrl_flags flags = tx->flags; | |
121 | ||
122 | switch (desc->hw->ctl_f.op) { | |
123 | case IOAT_OP_COPY: | |
58c8649e DW |
124 | if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ |
125 | ioat_dma_unmap(chan, flags, len, desc->hw); | |
bf40a686 DW |
126 | break; |
127 | case IOAT_OP_FILL: { | |
128 | struct ioat_fill_descriptor *hw = desc->fill; | |
129 | ||
130 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
131 | ioat_unmap(pdev, hw->dst_addr - offset, len, | |
132 | PCI_DMA_FROMDEVICE, flags, 1); | |
133 | break; | |
134 | } | |
b094ad3b DW |
135 | case IOAT_OP_XOR_VAL: |
136 | case IOAT_OP_XOR: { | |
137 | struct ioat_xor_descriptor *xor = desc->xor; | |
138 | struct ioat_ring_ent *ext; | |
139 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | |
140 | int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); | |
141 | struct ioat_raw_descriptor *descs[2]; | |
142 | int i; | |
143 | ||
144 | if (src_cnt > 5) { | |
145 | ext = ioat2_get_ring_ent(ioat, idx + 1); | |
146 | xor_ex = ext->xor_ex; | |
147 | } | |
148 | ||
149 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
150 | descs[0] = (struct ioat_raw_descriptor *) xor; | |
151 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | |
152 | for (i = 0; i < src_cnt; i++) { | |
153 | dma_addr_t src = xor_get_src(descs, i); | |
154 | ||
155 | ioat_unmap(pdev, src - offset, len, | |
156 | PCI_DMA_TODEVICE, flags, 0); | |
157 | } | |
158 | ||
159 | /* dest is a source in xor validate operations */ | |
160 | if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { | |
161 | ioat_unmap(pdev, xor->dst_addr - offset, len, | |
162 | PCI_DMA_TODEVICE, flags, 1); | |
163 | break; | |
164 | } | |
165 | } | |
166 | ||
167 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) | |
168 | ioat_unmap(pdev, xor->dst_addr - offset, len, | |
169 | PCI_DMA_FROMDEVICE, flags, 1); | |
170 | break; | |
171 | } | |
d69d235b DW |
172 | case IOAT_OP_PQ_VAL: |
173 | case IOAT_OP_PQ: { | |
174 | struct ioat_pq_descriptor *pq = desc->pq; | |
175 | struct ioat_ring_ent *ext; | |
176 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | |
177 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | |
178 | struct ioat_raw_descriptor *descs[2]; | |
179 | int i; | |
180 | ||
181 | if (src_cnt > 3) { | |
182 | ext = ioat2_get_ring_ent(ioat, idx + 1); | |
183 | pq_ex = ext->pq_ex; | |
184 | } | |
185 | ||
186 | /* in the 'continue' case don't unmap the dests as sources */ | |
187 | if (dmaf_p_disabled_continue(flags)) | |
188 | src_cnt--; | |
189 | else if (dmaf_continue(flags)) | |
190 | src_cnt -= 3; | |
191 | ||
192 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
193 | descs[0] = (struct ioat_raw_descriptor *) pq; | |
194 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | |
195 | for (i = 0; i < src_cnt; i++) { | |
196 | dma_addr_t src = pq_get_src(descs, i); | |
197 | ||
198 | ioat_unmap(pdev, src - offset, len, | |
199 | PCI_DMA_TODEVICE, flags, 0); | |
200 | } | |
201 | ||
202 | /* the dests are sources in pq validate operations */ | |
203 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | |
204 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
205 | ioat_unmap(pdev, pq->p_addr - offset, | |
206 | len, PCI_DMA_TODEVICE, flags, 0); | |
207 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
208 | ioat_unmap(pdev, pq->q_addr - offset, | |
209 | len, PCI_DMA_TODEVICE, flags, 0); | |
210 | break; | |
211 | } | |
212 | } | |
213 | ||
214 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
215 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
216 | ioat_unmap(pdev, pq->p_addr - offset, len, | |
217 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
218 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
219 | ioat_unmap(pdev, pq->q_addr - offset, len, | |
220 | PCI_DMA_BIDIRECTIONAL, flags, 1); | |
221 | } | |
222 | break; | |
223 | } | |
bf40a686 DW |
224 | default: |
225 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | |
226 | __func__, desc->hw->ctl_f.op); | |
227 | } | |
228 | } | |
229 | ||
b094ad3b DW |
230 | static bool desc_has_ext(struct ioat_ring_ent *desc) |
231 | { | |
232 | struct ioat_dma_descriptor *hw = desc->hw; | |
233 | ||
234 | if (hw->ctl_f.op == IOAT_OP_XOR || | |
235 | hw->ctl_f.op == IOAT_OP_XOR_VAL) { | |
236 | struct ioat_xor_descriptor *xor = desc->xor; | |
bf40a686 | 237 | |
b094ad3b DW |
238 | if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) |
239 | return true; | |
d69d235b DW |
240 | } else if (hw->ctl_f.op == IOAT_OP_PQ || |
241 | hw->ctl_f.op == IOAT_OP_PQ_VAL) { | |
242 | struct ioat_pq_descriptor *pq = desc->pq; | |
243 | ||
244 | if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) | |
245 | return true; | |
b094ad3b DW |
246 | } |
247 | ||
248 | return false; | |
249 | } | |
250 | ||
251 | /** | |
252 | * __cleanup - reclaim used descriptors | |
253 | * @ioat: channel (ring) to clean | |
254 | * | |
255 | * The difference from the dma_v2.c __cleanup() is that this routine | |
256 | * handles extended descriptors and dma-unmapping raid operations. | |
257 | */ | |
bf40a686 DW |
258 | static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) |
259 | { | |
260 | struct ioat_chan_common *chan = &ioat->base; | |
261 | struct ioat_ring_ent *desc; | |
262 | bool seen_current = false; | |
263 | u16 active; | |
264 | int i; | |
265 | ||
266 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | |
267 | __func__, ioat->head, ioat->tail, ioat->issued); | |
268 | ||
269 | active = ioat2_ring_active(ioat); | |
270 | for (i = 0; i < active && !seen_current; i++) { | |
271 | struct dma_async_tx_descriptor *tx; | |
272 | ||
273 | prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); | |
274 | desc = ioat2_get_ring_ent(ioat, ioat->tail + i); | |
275 | dump_desc_dbg(ioat, desc); | |
276 | tx = &desc->txd; | |
277 | if (tx->cookie) { | |
278 | chan->completed_cookie = tx->cookie; | |
b094ad3b | 279 | ioat3_dma_unmap(ioat, desc, ioat->tail + i); |
bf40a686 DW |
280 | tx->cookie = 0; |
281 | if (tx->callback) { | |
282 | tx->callback(tx->callback_param); | |
283 | tx->callback = NULL; | |
284 | } | |
285 | } | |
286 | ||
287 | if (tx->phys == phys_complete) | |
288 | seen_current = true; | |
b094ad3b DW |
289 | |
290 | /* skip extended descriptors */ | |
291 | if (desc_has_ext(desc)) { | |
292 | BUG_ON(i + 1 >= active); | |
293 | i++; | |
294 | } | |
bf40a686 DW |
295 | } |
296 | ioat->tail += i; | |
aa75db00 | 297 | BUG_ON(active && !seen_current); /* no active descs have written a completion? */ |
bf40a686 | 298 | chan->last_completion = phys_complete; |
b9cc9869 DW |
299 | |
300 | active = ioat2_ring_active(ioat); | |
301 | if (active == 0) { | |
bf40a686 DW |
302 | dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", |
303 | __func__); | |
304 | clear_bit(IOAT_COMPLETION_PENDING, &chan->state); | |
305 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
306 | } | |
b9cc9869 DW |
307 | /* 5 microsecond delay per pending descriptor */ |
308 | writew(min((5 * active), IOAT_INTRDELAY_MASK), | |
309 | chan->device->reg_base + IOAT_INTRDELAY_OFFSET); | |
bf40a686 DW |
310 | } |
311 | ||
b9cc9869 DW |
312 | /* try to cleanup, but yield (via spin_trylock) to incoming submissions |
313 | * with the expectation that we will immediately poll again shortly | |
314 | */ | |
315 | static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) | |
bf40a686 DW |
316 | { |
317 | struct ioat_chan_common *chan = &ioat->base; | |
318 | unsigned long phys_complete; | |
319 | ||
320 | prefetch(chan->completion); | |
321 | ||
322 | if (!spin_trylock_bh(&chan->cleanup_lock)) | |
323 | return; | |
324 | ||
325 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
326 | spin_unlock_bh(&chan->cleanup_lock); | |
327 | return; | |
328 | } | |
329 | ||
330 | if (!spin_trylock_bh(&ioat->ring_lock)) { | |
331 | spin_unlock_bh(&chan->cleanup_lock); | |
332 | return; | |
333 | } | |
334 | ||
335 | __cleanup(ioat, phys_complete); | |
336 | ||
337 | spin_unlock_bh(&ioat->ring_lock); | |
338 | spin_unlock_bh(&chan->cleanup_lock); | |
339 | } | |
340 | ||
b9cc9869 DW |
341 | /* run cleanup now because we already delayed the interrupt via INTRDELAY */ |
342 | static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) | |
343 | { | |
344 | struct ioat_chan_common *chan = &ioat->base; | |
345 | unsigned long phys_complete; | |
346 | ||
347 | prefetch(chan->completion); | |
348 | ||
349 | spin_lock_bh(&chan->cleanup_lock); | |
350 | if (!ioat_cleanup_preamble(chan, &phys_complete)) { | |
351 | spin_unlock_bh(&chan->cleanup_lock); | |
352 | return; | |
353 | } | |
354 | spin_lock_bh(&ioat->ring_lock); | |
355 | ||
356 | __cleanup(ioat, phys_complete); | |
357 | ||
358 | spin_unlock_bh(&ioat->ring_lock); | |
359 | spin_unlock_bh(&chan->cleanup_lock); | |
360 | } | |
361 | ||
aa4d72ae | 362 | static void ioat3_cleanup_event(unsigned long data) |
bf40a686 | 363 | { |
aa4d72ae | 364 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
bf40a686 | 365 | |
b9cc9869 | 366 | ioat3_cleanup_sync(ioat); |
773d9e2d | 367 | writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); |
bf40a686 DW |
368 | } |
369 | ||
370 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | |
371 | { | |
372 | struct ioat_chan_common *chan = &ioat->base; | |
373 | unsigned long phys_complete; | |
bf40a686 | 374 | |
b372ec2d | 375 | ioat2_quiesce(chan, 0); |
bf40a686 DW |
376 | if (ioat_cleanup_preamble(chan, &phys_complete)) |
377 | __cleanup(ioat, phys_complete); | |
378 | ||
379 | __ioat2_restart_chan(ioat); | |
380 | } | |
381 | ||
382 | static void ioat3_timer_event(unsigned long data) | |
383 | { | |
aa4d72ae | 384 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); |
bf40a686 DW |
385 | struct ioat_chan_common *chan = &ioat->base; |
386 | ||
387 | spin_lock_bh(&chan->cleanup_lock); | |
388 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | |
389 | unsigned long phys_complete; | |
390 | u64 status; | |
391 | ||
392 | spin_lock_bh(&ioat->ring_lock); | |
393 | status = ioat_chansts(chan); | |
394 | ||
395 | /* when halted due to errors check for channel | |
396 | * programming errors before advancing the completion state | |
397 | */ | |
398 | if (is_ioat_halted(status)) { | |
399 | u32 chanerr; | |
400 | ||
401 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
b57014de DW |
402 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", |
403 | __func__, chanerr); | |
bf40a686 DW |
404 | BUG_ON(is_ioat_bug(chanerr)); |
405 | } | |
406 | ||
407 | /* if we haven't made progress and we have already | |
408 | * acknowledged a pending completion once, then be more | |
409 | * forceful with a restart | |
410 | */ | |
411 | if (ioat_cleanup_preamble(chan, &phys_complete)) | |
412 | __cleanup(ioat, phys_complete); | |
413 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) | |
414 | ioat3_restart_channel(ioat); | |
415 | else { | |
416 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | |
417 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | |
418 | } | |
419 | spin_unlock_bh(&ioat->ring_lock); | |
420 | } else { | |
421 | u16 active; | |
422 | ||
423 | /* if the ring is idle, empty, and oversized try to step | |
424 | * down the size | |
425 | */ | |
426 | spin_lock_bh(&ioat->ring_lock); | |
427 | active = ioat2_ring_active(ioat); | |
428 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | |
429 | reshape_ring(ioat, ioat->alloc_order-1); | |
430 | spin_unlock_bh(&ioat->ring_lock); | |
431 | ||
432 | /* keep shrinking until we get back to our minimum | |
433 | * default size | |
434 | */ | |
435 | if (ioat->alloc_order > ioat_get_alloc_order()) | |
436 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | |
437 | } | |
438 | spin_unlock_bh(&chan->cleanup_lock); | |
439 | } | |
440 | ||
441 | static enum dma_status | |
442 | ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |
443 | dma_cookie_t *done, dma_cookie_t *used) | |
444 | { | |
445 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
446 | ||
447 | if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) | |
448 | return DMA_SUCCESS; | |
449 | ||
b9cc9869 | 450 | ioat3_cleanup_poll(ioat); |
bf40a686 DW |
451 | |
452 | return ioat_is_complete(c, cookie, done, used); | |
453 | } | |
454 | ||
455 | static struct dma_async_tx_descriptor * | |
456 | ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, | |
457 | size_t len, unsigned long flags) | |
458 | { | |
459 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
460 | struct ioat_ring_ent *desc; | |
461 | size_t total_len = len; | |
462 | struct ioat_fill_descriptor *fill; | |
463 | int num_descs; | |
464 | u64 src_data = (0x0101010101010101ULL) * (value & 0xff); | |
465 | u16 idx; | |
466 | int i; | |
467 | ||
468 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
469 | if (likely(num_descs) && | |
470 | ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) | |
471 | /* pass */; | |
472 | else | |
473 | return NULL; | |
cdef57db DW |
474 | i = 0; |
475 | do { | |
bf40a686 DW |
476 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); |
477 | ||
478 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
479 | fill = desc->fill; | |
480 | ||
481 | fill->size = xfer_size; | |
482 | fill->src_data = src_data; | |
483 | fill->dst_addr = dest; | |
484 | fill->ctl = 0; | |
485 | fill->ctl_f.op = IOAT_OP_FILL; | |
486 | ||
487 | len -= xfer_size; | |
488 | dest += xfer_size; | |
489 | dump_desc_dbg(ioat, desc); | |
cdef57db | 490 | } while (++i < num_descs); |
bf40a686 DW |
491 | |
492 | desc->txd.flags = flags; | |
493 | desc->len = total_len; | |
494 | fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
495 | fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
496 | fill->ctl_f.compl_write = 1; | |
497 | dump_desc_dbg(ioat, desc); | |
498 | ||
499 | /* we leave the channel locked to ensure in order submission */ | |
500 | return &desc->txd; | |
501 | } | |
502 | ||
b094ad3b DW |
503 | static struct dma_async_tx_descriptor * |
504 | __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, | |
505 | dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, | |
506 | size_t len, unsigned long flags) | |
507 | { | |
508 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
509 | struct ioat_ring_ent *compl_desc; | |
510 | struct ioat_ring_ent *desc; | |
511 | struct ioat_ring_ent *ext; | |
512 | size_t total_len = len; | |
513 | struct ioat_xor_descriptor *xor; | |
514 | struct ioat_xor_ext_descriptor *xor_ex = NULL; | |
515 | struct ioat_dma_descriptor *hw; | |
516 | u32 offset = 0; | |
517 | int num_descs; | |
518 | int with_ext; | |
519 | int i; | |
520 | u16 idx; | |
521 | u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; | |
522 | ||
523 | BUG_ON(src_cnt < 2); | |
524 | ||
525 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
526 | /* we need 2x the number of descriptors to cover greater than 5 | |
527 | * sources | |
528 | */ | |
529 | if (src_cnt > 5) { | |
530 | with_ext = 1; | |
531 | num_descs *= 2; | |
532 | } else | |
533 | with_ext = 0; | |
534 | ||
535 | /* completion writes from the raid engine may pass completion | |
536 | * writes from the legacy engine, so we need one extra null | |
537 | * (legacy) descriptor to ensure all completion writes arrive in | |
538 | * order. | |
539 | */ | |
540 | if (likely(num_descs) && | |
541 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | |
542 | /* pass */; | |
543 | else | |
544 | return NULL; | |
cdef57db DW |
545 | i = 0; |
546 | do { | |
b094ad3b DW |
547 | struct ioat_raw_descriptor *descs[2]; |
548 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | |
549 | int s; | |
550 | ||
551 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
552 | xor = desc->xor; | |
553 | ||
554 | /* save a branch by unconditionally retrieving the | |
555 | * extended descriptor xor_set_src() knows to not write | |
556 | * to it in the single descriptor case | |
557 | */ | |
558 | ext = ioat2_get_ring_ent(ioat, idx + i + 1); | |
559 | xor_ex = ext->xor_ex; | |
560 | ||
561 | descs[0] = (struct ioat_raw_descriptor *) xor; | |
562 | descs[1] = (struct ioat_raw_descriptor *) xor_ex; | |
563 | for (s = 0; s < src_cnt; s++) | |
564 | xor_set_src(descs, src[s], offset, s); | |
565 | xor->size = xfer_size; | |
566 | xor->dst_addr = dest + offset; | |
567 | xor->ctl = 0; | |
568 | xor->ctl_f.op = op; | |
569 | xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); | |
570 | ||
571 | len -= xfer_size; | |
572 | offset += xfer_size; | |
573 | dump_desc_dbg(ioat, desc); | |
cdef57db | 574 | } while ((i += 1 + with_ext) < num_descs); |
b094ad3b DW |
575 | |
576 | /* last xor descriptor carries the unmap parameters and fence bit */ | |
577 | desc->txd.flags = flags; | |
578 | desc->len = total_len; | |
579 | if (result) | |
580 | desc->result = result; | |
581 | xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
582 | ||
583 | /* completion descriptor carries interrupt bit */ | |
584 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | |
585 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | |
586 | hw = compl_desc->hw; | |
587 | hw->ctl = 0; | |
588 | hw->ctl_f.null = 1; | |
589 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
590 | hw->ctl_f.compl_write = 1; | |
591 | hw->size = NULL_DESC_BUFFER_SIZE; | |
592 | dump_desc_dbg(ioat, compl_desc); | |
593 | ||
594 | /* we leave the channel locked to ensure in order submission */ | |
49954c15 | 595 | return &compl_desc->txd; |
b094ad3b DW |
596 | } |
597 | ||
598 | static struct dma_async_tx_descriptor * | |
599 | ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
600 | unsigned int src_cnt, size_t len, unsigned long flags) | |
601 | { | |
602 | return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); | |
603 | } | |
604 | ||
605 | struct dma_async_tx_descriptor * | |
606 | ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |
607 | unsigned int src_cnt, size_t len, | |
608 | enum sum_check_flags *result, unsigned long flags) | |
609 | { | |
610 | /* the cleanup routine only sets bits on validate failure, it | |
611 | * does not clear bits on validate success... so clear it here | |
612 | */ | |
613 | *result = 0; | |
614 | ||
615 | return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], | |
616 | src_cnt - 1, len, flags); | |
617 | } | |
618 | ||
d69d235b DW |
619 | static void |
620 | dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) | |
621 | { | |
622 | struct device *dev = to_dev(&ioat->base); | |
623 | struct ioat_pq_descriptor *pq = desc->pq; | |
624 | struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; | |
625 | struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; | |
626 | int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); | |
627 | int i; | |
628 | ||
629 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | |
630 | " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", | |
631 | desc_id(desc), (unsigned long long) desc->txd.phys, | |
632 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | |
633 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, | |
634 | pq->ctl_f.compl_write, | |
635 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | |
636 | pq->ctl_f.src_cnt); | |
637 | for (i = 0; i < src_cnt; i++) | |
638 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | |
639 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | |
640 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | |
641 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | |
642 | } | |
643 | ||
644 | static struct dma_async_tx_descriptor * | |
645 | __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |
646 | const dma_addr_t *dst, const dma_addr_t *src, | |
647 | unsigned int src_cnt, const unsigned char *scf, | |
648 | size_t len, unsigned long flags) | |
649 | { | |
650 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
651 | struct ioat_chan_common *chan = &ioat->base; | |
652 | struct ioat_ring_ent *compl_desc; | |
653 | struct ioat_ring_ent *desc; | |
654 | struct ioat_ring_ent *ext; | |
655 | size_t total_len = len; | |
656 | struct ioat_pq_descriptor *pq; | |
657 | struct ioat_pq_ext_descriptor *pq_ex = NULL; | |
658 | struct ioat_dma_descriptor *hw; | |
659 | u32 offset = 0; | |
660 | int num_descs; | |
661 | int with_ext; | |
662 | int i, s; | |
663 | u16 idx; | |
664 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | |
665 | ||
666 | dev_dbg(to_dev(chan), "%s\n", __func__); | |
667 | /* the engine requires at least two sources (we provide | |
668 | * at least 1 implied source in the DMA_PREP_CONTINUE case) | |
669 | */ | |
670 | BUG_ON(src_cnt + dmaf_continue(flags) < 2); | |
671 | ||
672 | num_descs = ioat2_xferlen_to_descs(ioat, len); | |
673 | /* we need 2x the number of descriptors to cover greater than 3 | |
cd78809f DW |
674 | * sources (we need 1 extra source in the q-only continuation |
675 | * case and 3 extra sources in the p+q continuation case. | |
d69d235b | 676 | */ |
cd78809f DW |
677 | if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || |
678 | (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | |
d69d235b DW |
679 | with_ext = 1; |
680 | num_descs *= 2; | |
681 | } else | |
682 | with_ext = 0; | |
683 | ||
684 | /* completion writes from the raid engine may pass completion | |
685 | * writes from the legacy engine, so we need one extra null | |
686 | * (legacy) descriptor to ensure all completion writes arrive in | |
687 | * order. | |
688 | */ | |
689 | if (likely(num_descs) && | |
690 | ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) | |
691 | /* pass */; | |
692 | else | |
693 | return NULL; | |
cdef57db DW |
694 | i = 0; |
695 | do { | |
d69d235b DW |
696 | struct ioat_raw_descriptor *descs[2]; |
697 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | |
698 | ||
699 | desc = ioat2_get_ring_ent(ioat, idx + i); | |
700 | pq = desc->pq; | |
701 | ||
702 | /* save a branch by unconditionally retrieving the | |
703 | * extended descriptor pq_set_src() knows to not write | |
704 | * to it in the single descriptor case | |
705 | */ | |
706 | ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); | |
707 | pq_ex = ext->pq_ex; | |
708 | ||
709 | descs[0] = (struct ioat_raw_descriptor *) pq; | |
710 | descs[1] = (struct ioat_raw_descriptor *) pq_ex; | |
711 | ||
712 | for (s = 0; s < src_cnt; s++) | |
713 | pq_set_src(descs, src[s], offset, scf[s], s); | |
714 | ||
715 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | |
716 | if (dmaf_p_disabled_continue(flags)) | |
717 | pq_set_src(descs, dst[1], offset, 1, s++); | |
718 | else if (dmaf_continue(flags)) { | |
719 | pq_set_src(descs, dst[0], offset, 0, s++); | |
720 | pq_set_src(descs, dst[1], offset, 1, s++); | |
721 | pq_set_src(descs, dst[1], offset, 0, s++); | |
722 | } | |
723 | pq->size = xfer_size; | |
724 | pq->p_addr = dst[0] + offset; | |
725 | pq->q_addr = dst[1] + offset; | |
726 | pq->ctl = 0; | |
727 | pq->ctl_f.op = op; | |
728 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | |
729 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | |
730 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | |
731 | ||
732 | len -= xfer_size; | |
733 | offset += xfer_size; | |
cdef57db | 734 | } while ((i += 1 + with_ext) < num_descs); |
d69d235b DW |
735 | |
736 | /* last pq descriptor carries the unmap parameters and fence bit */ | |
737 | desc->txd.flags = flags; | |
738 | desc->len = total_len; | |
739 | if (result) | |
740 | desc->result = result; | |
741 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
742 | dump_pq_desc_dbg(ioat, desc, ext); | |
743 | ||
744 | /* completion descriptor carries interrupt bit */ | |
745 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | |
746 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | |
747 | hw = compl_desc->hw; | |
748 | hw->ctl = 0; | |
749 | hw->ctl_f.null = 1; | |
750 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | |
751 | hw->ctl_f.compl_write = 1; | |
752 | hw->size = NULL_DESC_BUFFER_SIZE; | |
753 | dump_desc_dbg(ioat, compl_desc); | |
754 | ||
755 | /* we leave the channel locked to ensure in order submission */ | |
49954c15 | 756 | return &compl_desc->txd; |
d69d235b DW |
757 | } |
758 | ||
759 | static struct dma_async_tx_descriptor * | |
760 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |
761 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
762 | unsigned long flags) | |
763 | { | |
de581b65 DW |
764 | /* specify valid address for disabled result */ |
765 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
766 | dst[0] = dst[1]; | |
767 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
768 | dst[1] = dst[0]; | |
769 | ||
d69d235b DW |
770 | /* handle the single source multiply case from the raid6 |
771 | * recovery path | |
772 | */ | |
de581b65 | 773 | if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) { |
d69d235b DW |
774 | dma_addr_t single_source[2]; |
775 | unsigned char single_source_coef[2]; | |
776 | ||
777 | BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); | |
778 | single_source[0] = src[0]; | |
779 | single_source[1] = src[0]; | |
780 | single_source_coef[0] = scf[0]; | |
781 | single_source_coef[1] = 0; | |
782 | ||
783 | return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, | |
784 | single_source_coef, len, flags); | |
785 | } else | |
786 | return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, | |
787 | len, flags); | |
788 | } | |
789 | ||
790 | struct dma_async_tx_descriptor * | |
791 | ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |
792 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
793 | enum sum_check_flags *pqres, unsigned long flags) | |
794 | { | |
de581b65 DW |
795 | /* specify valid address for disabled result */ |
796 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
797 | pq[0] = pq[1]; | |
798 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
799 | pq[1] = pq[0]; | |
800 | ||
d69d235b DW |
801 | /* the cleanup routine only sets bits on validate failure, it |
802 | * does not clear bits on validate success... so clear it here | |
803 | */ | |
804 | *pqres = 0; | |
805 | ||
806 | return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | |
807 | flags); | |
808 | } | |
809 | ||
ae786624 DW |
810 | static struct dma_async_tx_descriptor * |
811 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |
812 | unsigned int src_cnt, size_t len, unsigned long flags) | |
813 | { | |
814 | unsigned char scf[src_cnt]; | |
815 | dma_addr_t pq[2]; | |
816 | ||
817 | memset(scf, 0, src_cnt); | |
ae786624 | 818 | pq[0] = dst; |
de581b65 DW |
819 | flags |= DMA_PREP_PQ_DISABLE_Q; |
820 | pq[1] = dst; /* specify valid address for disabled result */ | |
ae786624 DW |
821 | |
822 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | |
823 | flags); | |
824 | } | |
825 | ||
826 | struct dma_async_tx_descriptor * | |
827 | ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |
828 | unsigned int src_cnt, size_t len, | |
829 | enum sum_check_flags *result, unsigned long flags) | |
830 | { | |
831 | unsigned char scf[src_cnt]; | |
832 | dma_addr_t pq[2]; | |
833 | ||
834 | /* the cleanup routine only sets bits on validate failure, it | |
835 | * does not clear bits on validate success... so clear it here | |
836 | */ | |
837 | *result = 0; | |
838 | ||
839 | memset(scf, 0, src_cnt); | |
ae786624 | 840 | pq[0] = src[0]; |
de581b65 DW |
841 | flags |= DMA_PREP_PQ_DISABLE_Q; |
842 | pq[1] = pq[0]; /* specify valid address for disabled result */ | |
ae786624 DW |
843 | |
844 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | |
845 | len, flags); | |
846 | } | |
847 | ||
58c8649e DW |
848 | static struct dma_async_tx_descriptor * |
849 | ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) | |
850 | { | |
851 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | |
852 | struct ioat_ring_ent *desc; | |
853 | struct ioat_dma_descriptor *hw; | |
854 | u16 idx; | |
855 | ||
856 | if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) | |
857 | desc = ioat2_get_ring_ent(ioat, idx); | |
858 | else | |
859 | return NULL; | |
860 | ||
861 | hw = desc->hw; | |
862 | hw->ctl = 0; | |
863 | hw->ctl_f.null = 1; | |
864 | hw->ctl_f.int_en = 1; | |
865 | hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | |
866 | hw->ctl_f.compl_write = 1; | |
867 | hw->size = NULL_DESC_BUFFER_SIZE; | |
868 | hw->src_addr = 0; | |
869 | hw->dst_addr = 0; | |
870 | ||
871 | desc->txd.flags = flags; | |
872 | desc->len = 1; | |
873 | ||
874 | dump_desc_dbg(ioat, desc); | |
875 | ||
876 | /* we leave the channel locked to ensure in order submission */ | |
877 | return &desc->txd; | |
878 | } | |
879 | ||
9de6fc71 DW |
880 | static void __devinit ioat3_dma_test_callback(void *dma_async_param) |
881 | { | |
882 | struct completion *cmp = dma_async_param; | |
883 | ||
884 | complete(cmp); | |
885 | } | |
886 | ||
887 | #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ | |
888 | static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) | |
889 | { | |
890 | int i, src_idx; | |
891 | struct page *dest; | |
892 | struct page *xor_srcs[IOAT_NUM_SRC_TEST]; | |
893 | struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; | |
894 | dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; | |
895 | dma_addr_t dma_addr, dest_dma; | |
896 | struct dma_async_tx_descriptor *tx; | |
897 | struct dma_chan *dma_chan; | |
898 | dma_cookie_t cookie; | |
899 | u8 cmp_byte = 0; | |
900 | u32 cmp_word; | |
901 | u32 xor_val_result; | |
902 | int err = 0; | |
903 | struct completion cmp; | |
904 | unsigned long tmo; | |
905 | struct device *dev = &device->pdev->dev; | |
906 | struct dma_device *dma = &device->common; | |
907 | ||
908 | dev_dbg(dev, "%s\n", __func__); | |
909 | ||
910 | if (!dma_has_cap(DMA_XOR, dma->cap_mask)) | |
911 | return 0; | |
912 | ||
913 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
914 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | |
915 | if (!xor_srcs[src_idx]) { | |
916 | while (src_idx--) | |
917 | __free_page(xor_srcs[src_idx]); | |
918 | return -ENOMEM; | |
919 | } | |
920 | } | |
921 | ||
922 | dest = alloc_page(GFP_KERNEL); | |
923 | if (!dest) { | |
924 | while (src_idx--) | |
925 | __free_page(xor_srcs[src_idx]); | |
926 | return -ENOMEM; | |
927 | } | |
928 | ||
929 | /* Fill in src buffers */ | |
930 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { | |
931 | u8 *ptr = page_address(xor_srcs[src_idx]); | |
932 | for (i = 0; i < PAGE_SIZE; i++) | |
933 | ptr[i] = (1 << src_idx); | |
934 | } | |
935 | ||
936 | for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) | |
937 | cmp_byte ^= (u8) (1 << src_idx); | |
938 | ||
939 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
940 | (cmp_byte << 8) | cmp_byte; | |
941 | ||
942 | memset(page_address(dest), 0, PAGE_SIZE); | |
943 | ||
944 | dma_chan = container_of(dma->channels.next, struct dma_chan, | |
945 | device_node); | |
946 | if (dma->device_alloc_chan_resources(dma_chan) < 1) { | |
947 | err = -ENODEV; | |
948 | goto out; | |
949 | } | |
950 | ||
951 | /* test xor */ | |
952 | dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
953 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
954 | dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, | |
955 | DMA_TO_DEVICE); | |
956 | tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
957 | IOAT_NUM_SRC_TEST, PAGE_SIZE, | |
958 | DMA_PREP_INTERRUPT); | |
959 | ||
960 | if (!tx) { | |
961 | dev_err(dev, "Self-test xor prep failed\n"); | |
962 | err = -ENODEV; | |
963 | goto free_resources; | |
964 | } | |
965 | ||
966 | async_tx_ack(tx); | |
967 | init_completion(&cmp); | |
968 | tx->callback = ioat3_dma_test_callback; | |
969 | tx->callback_param = &cmp; | |
970 | cookie = tx->tx_submit(tx); | |
971 | if (cookie < 0) { | |
972 | dev_err(dev, "Self-test xor setup failed\n"); | |
973 | err = -ENODEV; | |
974 | goto free_resources; | |
975 | } | |
976 | dma->device_issue_pending(dma_chan); | |
977 | ||
978 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
979 | ||
980 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | |
981 | dev_err(dev, "Self-test xor timed out\n"); | |
982 | err = -ENODEV; | |
983 | goto free_resources; | |
984 | } | |
985 | ||
986 | dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); | |
987 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
988 | u32 *ptr = page_address(dest); | |
989 | if (ptr[i] != cmp_word) { | |
990 | dev_err(dev, "Self-test xor failed compare\n"); | |
991 | err = -ENODEV; | |
992 | goto free_resources; | |
993 | } | |
994 | } | |
995 | dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); | |
996 | ||
997 | /* skip validate if the capability is not present */ | |
998 | if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) | |
999 | goto free_resources; | |
1000 | ||
1001 | /* validate the sources with the destintation page */ | |
1002 | for (i = 0; i < IOAT_NUM_SRC_TEST; i++) | |
1003 | xor_val_srcs[i] = xor_srcs[i]; | |
1004 | xor_val_srcs[i] = dest; | |
1005 | ||
1006 | xor_val_result = 1; | |
1007 | ||
1008 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1009 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
1010 | DMA_TO_DEVICE); | |
1011 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
1012 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
1013 | &xor_val_result, DMA_PREP_INTERRUPT); | |
1014 | if (!tx) { | |
1015 | dev_err(dev, "Self-test zero prep failed\n"); | |
1016 | err = -ENODEV; | |
1017 | goto free_resources; | |
1018 | } | |
1019 | ||
1020 | async_tx_ack(tx); | |
1021 | init_completion(&cmp); | |
1022 | tx->callback = ioat3_dma_test_callback; | |
1023 | tx->callback_param = &cmp; | |
1024 | cookie = tx->tx_submit(tx); | |
1025 | if (cookie < 0) { | |
1026 | dev_err(dev, "Self-test zero setup failed\n"); | |
1027 | err = -ENODEV; | |
1028 | goto free_resources; | |
1029 | } | |
1030 | dma->device_issue_pending(dma_chan); | |
1031 | ||
1032 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1033 | ||
1034 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | |
1035 | dev_err(dev, "Self-test validate timed out\n"); | |
1036 | err = -ENODEV; | |
1037 | goto free_resources; | |
1038 | } | |
1039 | ||
1040 | if (xor_val_result != 0) { | |
1041 | dev_err(dev, "Self-test validate failed compare\n"); | |
1042 | err = -ENODEV; | |
1043 | goto free_resources; | |
1044 | } | |
1045 | ||
1046 | /* skip memset if the capability is not present */ | |
1047 | if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) | |
1048 | goto free_resources; | |
1049 | ||
1050 | /* test memset */ | |
1051 | dma_addr = dma_map_page(dev, dest, 0, | |
1052 | PAGE_SIZE, DMA_FROM_DEVICE); | |
1053 | tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, | |
1054 | DMA_PREP_INTERRUPT); | |
1055 | if (!tx) { | |
1056 | dev_err(dev, "Self-test memset prep failed\n"); | |
1057 | err = -ENODEV; | |
1058 | goto free_resources; | |
1059 | } | |
1060 | ||
1061 | async_tx_ack(tx); | |
1062 | init_completion(&cmp); | |
1063 | tx->callback = ioat3_dma_test_callback; | |
1064 | tx->callback_param = &cmp; | |
1065 | cookie = tx->tx_submit(tx); | |
1066 | if (cookie < 0) { | |
1067 | dev_err(dev, "Self-test memset setup failed\n"); | |
1068 | err = -ENODEV; | |
1069 | goto free_resources; | |
1070 | } | |
1071 | dma->device_issue_pending(dma_chan); | |
1072 | ||
1073 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1074 | ||
1075 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | |
1076 | dev_err(dev, "Self-test memset timed out\n"); | |
1077 | err = -ENODEV; | |
1078 | goto free_resources; | |
1079 | } | |
1080 | ||
1081 | for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { | |
1082 | u32 *ptr = page_address(dest); | |
1083 | if (ptr[i]) { | |
1084 | dev_err(dev, "Self-test memset failed compare\n"); | |
1085 | err = -ENODEV; | |
1086 | goto free_resources; | |
1087 | } | |
1088 | } | |
1089 | ||
1090 | /* test for non-zero parity sum */ | |
1091 | xor_val_result = 0; | |
1092 | for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) | |
1093 | dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, | |
1094 | DMA_TO_DEVICE); | |
1095 | tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, | |
1096 | IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, | |
1097 | &xor_val_result, DMA_PREP_INTERRUPT); | |
1098 | if (!tx) { | |
1099 | dev_err(dev, "Self-test 2nd zero prep failed\n"); | |
1100 | err = -ENODEV; | |
1101 | goto free_resources; | |
1102 | } | |
1103 | ||
1104 | async_tx_ack(tx); | |
1105 | init_completion(&cmp); | |
1106 | tx->callback = ioat3_dma_test_callback; | |
1107 | tx->callback_param = &cmp; | |
1108 | cookie = tx->tx_submit(tx); | |
1109 | if (cookie < 0) { | |
1110 | dev_err(dev, "Self-test 2nd zero setup failed\n"); | |
1111 | err = -ENODEV; | |
1112 | goto free_resources; | |
1113 | } | |
1114 | dma->device_issue_pending(dma_chan); | |
1115 | ||
1116 | tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); | |
1117 | ||
1118 | if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { | |
1119 | dev_err(dev, "Self-test 2nd validate timed out\n"); | |
1120 | err = -ENODEV; | |
1121 | goto free_resources; | |
1122 | } | |
1123 | ||
1124 | if (xor_val_result != SUM_CHECK_P_RESULT) { | |
1125 | dev_err(dev, "Self-test validate failed compare\n"); | |
1126 | err = -ENODEV; | |
1127 | goto free_resources; | |
1128 | } | |
1129 | ||
1130 | free_resources: | |
1131 | dma->device_free_chan_resources(dma_chan); | |
1132 | out: | |
1133 | src_idx = IOAT_NUM_SRC_TEST; | |
1134 | while (src_idx--) | |
1135 | __free_page(xor_srcs[src_idx]); | |
1136 | __free_page(dest); | |
1137 | return err; | |
1138 | } | |
1139 | ||
1140 | static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | |
1141 | { | |
1142 | int rc = ioat_dma_self_test(device); | |
1143 | ||
1144 | if (rc) | |
1145 | return rc; | |
1146 | ||
1147 | rc = ioat_xor_val_self_test(device); | |
1148 | if (rc) | |
1149 | return rc; | |
1150 | ||
1151 | return 0; | |
1152 | } | |
1153 | ||
a6d52d70 DW |
1154 | static int ioat3_reset_hw(struct ioat_chan_common *chan) |
1155 | { | |
1156 | /* throw away whatever the channel was doing and get it | |
1157 | * initialized, with ioat3 specific workarounds | |
1158 | */ | |
1159 | struct ioatdma_device *device = chan->device; | |
1160 | struct pci_dev *pdev = device->pdev; | |
1161 | u32 chanerr; | |
1162 | u16 dev_id; | |
1163 | int err; | |
1164 | ||
1165 | ioat2_quiesce(chan, msecs_to_jiffies(100)); | |
1166 | ||
1167 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
1168 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | |
1169 | ||
1170 | /* -= IOAT ver.3 workarounds =- */ | |
1171 | /* Write CHANERRMSK_INT with 3E07h to mask out the errors | |
1172 | * that can cause stability issues for IOAT ver.3, and clear any | |
1173 | * pending errors | |
1174 | */ | |
1175 | pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | |
1176 | err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | |
1177 | if (err) { | |
1178 | dev_err(&pdev->dev, "channel error register unreachable\n"); | |
1179 | return err; | |
1180 | } | |
1181 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | |
1182 | ||
1183 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | |
1184 | * (workaround for spurious config parity error after restart) | |
1185 | */ | |
1186 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | |
1187 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | |
1188 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | |
1189 | ||
1190 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | |
1191 | } | |
1192 | ||
bf40a686 DW |
1193 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) |
1194 | { | |
1195 | struct pci_dev *pdev = device->pdev; | |
228c4f5c | 1196 | int dca_en = system_has_dca_enabled(pdev); |
bf40a686 DW |
1197 | struct dma_device *dma; |
1198 | struct dma_chan *c; | |
1199 | struct ioat_chan_common *chan; | |
e3232714 | 1200 | bool is_raid_device = false; |
bf40a686 | 1201 | int err; |
bf40a686 DW |
1202 | u32 cap; |
1203 | ||
1204 | device->enumerate_channels = ioat2_enumerate_channels; | |
a6d52d70 | 1205 | device->reset_hw = ioat3_reset_hw; |
9de6fc71 | 1206 | device->self_test = ioat3_dma_self_test; |
bf40a686 DW |
1207 | dma = &device->common; |
1208 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | |
1209 | dma->device_issue_pending = ioat2_issue_pending; | |
1210 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | |
1211 | dma->device_free_chan_resources = ioat2_free_chan_resources; | |
58c8649e DW |
1212 | |
1213 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | |
1214 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | |
1215 | ||
bf40a686 | 1216 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
228c4f5c DW |
1217 | |
1218 | /* dca is incompatible with raid operations */ | |
1219 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | |
1220 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | |
1221 | ||
b094ad3b | 1222 | if (cap & IOAT_CAP_XOR) { |
e3232714 | 1223 | is_raid_device = true; |
b094ad3b DW |
1224 | dma->max_xor = 8; |
1225 | dma->xor_align = 2; | |
1226 | ||
1227 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1228 | dma->device_prep_dma_xor = ioat3_prep_xor; | |
1229 | ||
1230 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
1231 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | |
1232 | } | |
d69d235b | 1233 | if (cap & IOAT_CAP_PQ) { |
e3232714 | 1234 | is_raid_device = true; |
d69d235b DW |
1235 | dma_set_maxpq(dma, 8, 0); |
1236 | dma->pq_align = 2; | |
1237 | ||
1238 | dma_cap_set(DMA_PQ, dma->cap_mask); | |
1239 | dma->device_prep_dma_pq = ioat3_prep_pq; | |
1240 | ||
1241 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | |
1242 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | |
ae786624 DW |
1243 | |
1244 | if (!(cap & IOAT_CAP_XOR)) { | |
1245 | dma->max_xor = 8; | |
1246 | dma->xor_align = 2; | |
1247 | ||
1248 | dma_cap_set(DMA_XOR, dma->cap_mask); | |
1249 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | |
1250 | ||
1251 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | |
1252 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | |
1253 | } | |
d69d235b | 1254 | } |
e3232714 DW |
1255 | if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { |
1256 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | |
1257 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | |
1258 | } | |
1259 | ||
1260 | ||
1261 | if (is_raid_device) { | |
1262 | dma->device_is_tx_complete = ioat3_is_complete; | |
aa4d72ae | 1263 | device->cleanup_fn = ioat3_cleanup_event; |
e3232714 DW |
1264 | device->timer_fn = ioat3_timer_event; |
1265 | } else { | |
aa4d72ae DW |
1266 | dma->device_is_tx_complete = ioat_is_dma_complete; |
1267 | device->cleanup_fn = ioat2_cleanup_event; | |
e3232714 DW |
1268 | device->timer_fn = ioat2_timer_event; |
1269 | } | |
bf40a686 | 1270 | |
7b3cc2b1 DW |
1271 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA |
1272 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | |
1273 | dma->device_prep_dma_pq_val = NULL; | |
1274 | #endif | |
1275 | ||
1276 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
1277 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | |
1278 | dma->device_prep_dma_xor_val = NULL; | |
1279 | #endif | |
1280 | ||
bf40a686 DW |
1281 | err = ioat_probe(device); |
1282 | if (err) | |
1283 | return err; | |
1284 | ioat_set_tcp_copy_break(262144); | |
1285 | ||
1286 | list_for_each_entry(c, &dma->channels, device_node) { | |
1287 | chan = to_chan_common(c); | |
1288 | writel(IOAT_DMA_DCA_ANY_CPU, | |
1289 | chan->reg_base + IOAT_DCACTRL_OFFSET); | |
1290 | } | |
1291 | ||
1292 | err = ioat_register(device); | |
1293 | if (err) | |
1294 | return err; | |
5669e31c DW |
1295 | |
1296 | ioat_kobject_add(device, &ioat2_ktype); | |
1297 | ||
bf40a686 DW |
1298 | if (dca) |
1299 | device->dca = ioat3_dca_init(pdev, device->reg_base); | |
1300 | ||
1301 | return 0; | |
1302 | } |