]>
Commit | Line | Data |
---|---|---|
12458ea0 AG |
1 | /* |
2 | * Copyright (C) 2006-2009 DENX Software Engineering. | |
3 | * | |
4 | * Author: Yuri Tikhonov <yur@emcraft.com> | |
5 | * | |
6 | * Further porting to arch/powerpc by | |
7 | * Anatolij Gustschin <agust@denx.de> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
17 | * more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License along with | |
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
22 | * | |
23 | * The full GNU General Public License is included in this distribution in the | |
24 | * file called COPYING. | |
25 | */ | |
26 | ||
27 | /* | |
28 | * This driver supports the asynchrounous DMA copy and RAID engines available | |
29 | * on the AMCC PPC440SPe Processors. | |
30 | * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) | |
31 | * ADMA driver written by D.Williams. | |
32 | */ | |
33 | ||
34 | #include <linux/init.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/async_tx.h> | |
37 | #include <linux/delay.h> | |
38 | #include <linux/dma-mapping.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/interrupt.h> | |
5a0e3ad6 | 41 | #include <linux/slab.h> |
12458ea0 AG |
42 | #include <linux/uaccess.h> |
43 | #include <linux/proc_fs.h> | |
44 | #include <linux/of.h> | |
45 | #include <linux/of_platform.h> | |
46 | #include <asm/dcr.h> | |
47 | #include <asm/dcr-regs.h> | |
48 | #include "adma.h" | |
49 | ||
50 | enum ppc_adma_init_code { | |
51 | PPC_ADMA_INIT_OK = 0, | |
52 | PPC_ADMA_INIT_MEMRES, | |
53 | PPC_ADMA_INIT_MEMREG, | |
54 | PPC_ADMA_INIT_ALLOC, | |
55 | PPC_ADMA_INIT_COHERENT, | |
56 | PPC_ADMA_INIT_CHANNEL, | |
57 | PPC_ADMA_INIT_IRQ1, | |
58 | PPC_ADMA_INIT_IRQ2, | |
59 | PPC_ADMA_INIT_REGISTER | |
60 | }; | |
61 | ||
62 | static char *ppc_adma_errors[] = { | |
63 | [PPC_ADMA_INIT_OK] = "ok", | |
64 | [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource", | |
65 | [PPC_ADMA_INIT_MEMREG] = "failed to request memory region", | |
66 | [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev " | |
67 | "structure", | |
68 | [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for " | |
69 | "hardware descriptors", | |
70 | [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel", | |
71 | [PPC_ADMA_INIT_IRQ1] = "failed to request first irq", | |
72 | [PPC_ADMA_INIT_IRQ2] = "failed to request second irq", | |
73 | [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device", | |
74 | }; | |
75 | ||
76 | static enum ppc_adma_init_code | |
77 | ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM]; | |
78 | ||
79 | struct ppc_dma_chan_ref { | |
80 | struct dma_chan *chan; | |
81 | struct list_head node; | |
82 | }; | |
83 | ||
84 | /* The list of channels exported by ppc440spe ADMA */ | |
85 | struct list_head | |
86 | ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list); | |
87 | ||
88 | /* This flag is set when want to refetch the xor chain in the interrupt | |
89 | * handler | |
90 | */ | |
91 | static u32 do_xor_refetch; | |
92 | ||
93 | /* Pointer to DMA0, DMA1 CP/CS FIFO */ | |
94 | static void *ppc440spe_dma_fifo_buf; | |
95 | ||
96 | /* Pointers to last submitted to DMA0, DMA1 CDBs */ | |
97 | static struct ppc440spe_adma_desc_slot *chan_last_sub[3]; | |
98 | static struct ppc440spe_adma_desc_slot *chan_first_cdb[3]; | |
99 | ||
100 | /* Pointer to last linked and submitted xor CB */ | |
101 | static struct ppc440spe_adma_desc_slot *xor_last_linked; | |
102 | static struct ppc440spe_adma_desc_slot *xor_last_submit; | |
103 | ||
104 | /* This array is used in data-check operations for storing a pattern */ | |
105 | static char ppc440spe_qword[16]; | |
106 | ||
107 | static atomic_t ppc440spe_adma_err_irq_ref; | |
108 | static dcr_host_t ppc440spe_mq_dcr_host; | |
109 | static unsigned int ppc440spe_mq_dcr_len; | |
110 | ||
111 | /* Since RXOR operations use the common register (MQ0_CF2H) for setting-up | |
112 | * the block size in transactions, then we do not allow to activate more than | |
113 | * only one RXOR transactions simultaneously. So use this var to store | |
114 | * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is | |
115 | * set) or not (PPC440SPE_RXOR_RUN is clear). | |
116 | */ | |
117 | static unsigned long ppc440spe_rxor_state; | |
118 | ||
119 | /* These are used in enable & check routines | |
120 | */ | |
121 | static u32 ppc440spe_r6_enabled; | |
122 | static struct ppc440spe_adma_chan *ppc440spe_r6_tchan; | |
123 | static struct completion ppc440spe_r6_test_comp; | |
124 | ||
125 | static int ppc440spe_adma_dma2rxor_prep_src( | |
126 | struct ppc440spe_adma_desc_slot *desc, | |
127 | struct ppc440spe_rxor *cursor, int index, | |
128 | int src_cnt, u32 addr); | |
129 | static void ppc440spe_adma_dma2rxor_set_src( | |
130 | struct ppc440spe_adma_desc_slot *desc, | |
131 | int index, dma_addr_t addr); | |
132 | static void ppc440spe_adma_dma2rxor_set_mult( | |
133 | struct ppc440spe_adma_desc_slot *desc, | |
134 | int index, u8 mult); | |
135 | ||
136 | #ifdef ADMA_LL_DEBUG | |
137 | #define ADMA_LL_DBG(x) ({ if (1) x; 0; }) | |
138 | #else | |
139 | #define ADMA_LL_DBG(x) ({ if (0) x; 0; }) | |
140 | #endif | |
141 | ||
142 | static void print_cb(struct ppc440spe_adma_chan *chan, void *block) | |
143 | { | |
144 | struct dma_cdb *cdb; | |
145 | struct xor_cb *cb; | |
146 | int i; | |
147 | ||
148 | switch (chan->device->id) { | |
149 | case 0: | |
150 | case 1: | |
151 | cdb = block; | |
152 | ||
153 | pr_debug("CDB at %p [%d]:\n" | |
154 | "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n" | |
155 | "\t sg1u 0x%08x sg1l 0x%08x\n" | |
156 | "\t sg2u 0x%08x sg2l 0x%08x\n" | |
157 | "\t sg3u 0x%08x sg3l 0x%08x\n", | |
158 | cdb, chan->device->id, | |
159 | cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt), | |
160 | le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l), | |
161 | le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l), | |
162 | le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l) | |
163 | ); | |
164 | break; | |
165 | case 2: | |
166 | cb = block; | |
167 | ||
168 | pr_debug("CB at %p [%d]:\n" | |
169 | "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n" | |
170 | "\t cbtah 0x%08x cbtal 0x%08x\n" | |
171 | "\t cblah 0x%08x cblal 0x%08x\n", | |
172 | cb, chan->device->id, | |
173 | cb->cbc, cb->cbbc, cb->cbs, | |
174 | cb->cbtah, cb->cbtal, | |
175 | cb->cblah, cb->cblal); | |
176 | for (i = 0; i < 16; i++) { | |
177 | if (i && !cb->ops[i].h && !cb->ops[i].l) | |
178 | continue; | |
179 | pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n", | |
180 | i, cb->ops[i].h, cb->ops[i].l); | |
181 | } | |
182 | break; | |
183 | } | |
184 | } | |
185 | ||
186 | static void print_cb_list(struct ppc440spe_adma_chan *chan, | |
187 | struct ppc440spe_adma_desc_slot *iter) | |
188 | { | |
189 | for (; iter; iter = iter->hw_next) | |
190 | print_cb(chan, iter->hw_desc); | |
191 | } | |
192 | ||
193 | static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src, | |
194 | unsigned int src_cnt) | |
195 | { | |
196 | int i; | |
197 | ||
198 | pr_debug("\n%s(%d):\nsrc: ", __func__, id); | |
199 | for (i = 0; i < src_cnt; i++) | |
200 | pr_debug("\t0x%016llx ", src[i]); | |
201 | pr_debug("dst:\n\t0x%016llx\n", dst); | |
202 | } | |
203 | ||
204 | static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src, | |
205 | unsigned int src_cnt) | |
206 | { | |
207 | int i; | |
208 | ||
209 | pr_debug("\n%s(%d):\nsrc: ", __func__, id); | |
210 | for (i = 0; i < src_cnt; i++) | |
211 | pr_debug("\t0x%016llx ", src[i]); | |
212 | pr_debug("dst: "); | |
213 | for (i = 0; i < 2; i++) | |
214 | pr_debug("\t0x%016llx ", dst[i]); | |
215 | } | |
216 | ||
217 | static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src, | |
218 | unsigned int src_cnt, | |
219 | const unsigned char *scf) | |
220 | { | |
221 | int i; | |
222 | ||
223 | pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id); | |
224 | if (scf) { | |
225 | for (i = 0; i < src_cnt; i++) | |
226 | pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]); | |
227 | } else { | |
228 | for (i = 0; i < src_cnt; i++) | |
229 | pr_debug("\t0x%016llx(no) ", src[i]); | |
230 | } | |
231 | ||
232 | pr_debug("dst: "); | |
233 | for (i = 0; i < 2; i++) | |
234 | pr_debug("\t0x%016llx ", src[src_cnt + i]); | |
235 | } | |
236 | ||
237 | /****************************************************************************** | |
238 | * Command (Descriptor) Blocks low-level routines | |
239 | ******************************************************************************/ | |
240 | /** | |
241 | * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT | |
242 | * pseudo operation | |
243 | */ | |
244 | static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc, | |
245 | struct ppc440spe_adma_chan *chan) | |
246 | { | |
247 | struct xor_cb *p; | |
248 | ||
249 | switch (chan->device->id) { | |
250 | case PPC440SPE_XOR_ID: | |
251 | p = desc->hw_desc; | |
252 | memset(desc->hw_desc, 0, sizeof(struct xor_cb)); | |
253 | /* NOP with Command Block Complete Enable */ | |
254 | p->cbc = XOR_CBCR_CBCE_BIT; | |
255 | break; | |
256 | case PPC440SPE_DMA0_ID: | |
257 | case PPC440SPE_DMA1_ID: | |
258 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | |
259 | /* NOP with interrupt */ | |
260 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | |
261 | break; | |
262 | default: | |
263 | printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id, | |
264 | __func__); | |
265 | break; | |
266 | } | |
267 | } | |
268 | ||
269 | /** | |
270 | * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR | |
271 | * pseudo operation | |
272 | */ | |
273 | static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc) | |
274 | { | |
275 | memset(desc->hw_desc, 0, sizeof(struct xor_cb)); | |
276 | desc->hw_next = NULL; | |
277 | desc->src_cnt = 0; | |
278 | desc->dst_cnt = 1; | |
279 | } | |
280 | ||
281 | /** | |
282 | * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation | |
283 | */ | |
284 | static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc, | |
285 | int src_cnt, unsigned long flags) | |
286 | { | |
287 | struct xor_cb *hw_desc = desc->hw_desc; | |
288 | ||
289 | memset(desc->hw_desc, 0, sizeof(struct xor_cb)); | |
290 | desc->hw_next = NULL; | |
291 | desc->src_cnt = src_cnt; | |
292 | desc->dst_cnt = 1; | |
293 | ||
294 | hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt; | |
295 | if (flags & DMA_PREP_INTERRUPT) | |
296 | /* Enable interrupt on completion */ | |
297 | hw_desc->cbc |= XOR_CBCR_CBCE_BIT; | |
298 | } | |
299 | ||
300 | /** | |
301 | * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ | |
302 | * operation in DMA2 controller | |
303 | */ | |
304 | static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc, | |
305 | int dst_cnt, int src_cnt, unsigned long flags) | |
306 | { | |
307 | struct xor_cb *hw_desc = desc->hw_desc; | |
308 | ||
309 | memset(desc->hw_desc, 0, sizeof(struct xor_cb)); | |
310 | desc->hw_next = NULL; | |
311 | desc->src_cnt = src_cnt; | |
312 | desc->dst_cnt = dst_cnt; | |
313 | memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags)); | |
314 | desc->descs_per_op = 0; | |
315 | ||
316 | hw_desc->cbc = XOR_CBCR_TGT_BIT; | |
317 | if (flags & DMA_PREP_INTERRUPT) | |
318 | /* Enable interrupt on completion */ | |
319 | hw_desc->cbc |= XOR_CBCR_CBCE_BIT; | |
320 | } | |
321 | ||
322 | #define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE | |
323 | #define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1) | |
324 | #define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1) | |
325 | ||
326 | /** | |
327 | * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation | |
328 | * with DMA0/1 | |
329 | */ | |
330 | static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc, | |
331 | int dst_cnt, int src_cnt, unsigned long flags, | |
332 | unsigned long op) | |
333 | { | |
334 | struct dma_cdb *hw_desc; | |
335 | struct ppc440spe_adma_desc_slot *iter; | |
336 | u8 dopc; | |
337 | ||
338 | /* Common initialization of a PQ descriptors chain */ | |
339 | set_bits(op, &desc->flags); | |
340 | desc->src_cnt = src_cnt; | |
341 | desc->dst_cnt = dst_cnt; | |
342 | ||
343 | /* WXOR MULTICAST if both P and Q are being computed | |
344 | * MV_SG1_SG2 if Q only | |
345 | */ | |
346 | dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ? | |
347 | DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2; | |
348 | ||
349 | list_for_each_entry(iter, &desc->group_list, chain_node) { | |
350 | hw_desc = iter->hw_desc; | |
351 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
352 | ||
353 | if (likely(!list_is_last(&iter->chain_node, | |
354 | &desc->group_list))) { | |
355 | /* set 'next' pointer */ | |
356 | iter->hw_next = list_entry(iter->chain_node.next, | |
357 | struct ppc440spe_adma_desc_slot, chain_node); | |
358 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
359 | } else { | |
360 | /* this is the last descriptor. | |
361 | * this slot will be pasted from ADMA level | |
362 | * each time it wants to configure parameters | |
363 | * of the transaction (src, dst, ...) | |
364 | */ | |
365 | iter->hw_next = NULL; | |
366 | if (flags & DMA_PREP_INTERRUPT) | |
367 | set_bit(PPC440SPE_DESC_INT, &iter->flags); | |
368 | else | |
369 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
370 | } | |
371 | } | |
372 | ||
373 | /* Set OPS depending on WXOR/RXOR type of operation */ | |
374 | if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) { | |
375 | /* This is a WXOR only chain: | |
376 | * - first descriptors are for zeroing destinations | |
377 | * if PPC440SPE_ZERO_P/Q set; | |
378 | * - descriptors remained are for GF-XOR operations. | |
379 | */ | |
380 | iter = list_first_entry(&desc->group_list, | |
381 | struct ppc440spe_adma_desc_slot, | |
382 | chain_node); | |
383 | ||
384 | if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) { | |
385 | hw_desc = iter->hw_desc; | |
386 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
387 | iter = list_first_entry(&iter->chain_node, | |
388 | struct ppc440spe_adma_desc_slot, | |
389 | chain_node); | |
390 | } | |
391 | ||
392 | if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) { | |
393 | hw_desc = iter->hw_desc; | |
394 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
395 | iter = list_first_entry(&iter->chain_node, | |
396 | struct ppc440spe_adma_desc_slot, | |
397 | chain_node); | |
398 | } | |
399 | ||
400 | list_for_each_entry_from(iter, &desc->group_list, chain_node) { | |
401 | hw_desc = iter->hw_desc; | |
402 | hw_desc->opc = dopc; | |
403 | } | |
404 | } else { | |
405 | /* This is either RXOR-only or mixed RXOR/WXOR */ | |
406 | ||
407 | /* The first 1 or 2 slots in chain are always RXOR, | |
408 | * if need to calculate P & Q, then there are two | |
409 | * RXOR slots; if only P or only Q, then there is one | |
410 | */ | |
411 | iter = list_first_entry(&desc->group_list, | |
412 | struct ppc440spe_adma_desc_slot, | |
413 | chain_node); | |
414 | hw_desc = iter->hw_desc; | |
415 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
416 | ||
417 | if (desc->dst_cnt == DMA_DEST_MAX_NUM) { | |
418 | iter = list_first_entry(&iter->chain_node, | |
419 | struct ppc440spe_adma_desc_slot, | |
420 | chain_node); | |
421 | hw_desc = iter->hw_desc; | |
422 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
423 | } | |
424 | ||
425 | /* The remaining descs (if any) are WXORs */ | |
426 | if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) { | |
427 | iter = list_first_entry(&iter->chain_node, | |
428 | struct ppc440spe_adma_desc_slot, | |
429 | chain_node); | |
430 | list_for_each_entry_from(iter, &desc->group_list, | |
431 | chain_node) { | |
432 | hw_desc = iter->hw_desc; | |
433 | hw_desc->opc = dopc; | |
434 | } | |
435 | } | |
436 | } | |
437 | } | |
438 | ||
439 | /** | |
440 | * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor | |
441 | * for PQ_ZERO_SUM operation | |
442 | */ | |
443 | static void ppc440spe_desc_init_dma01pqzero_sum( | |
444 | struct ppc440spe_adma_desc_slot *desc, | |
445 | int dst_cnt, int src_cnt) | |
446 | { | |
447 | struct dma_cdb *hw_desc; | |
448 | struct ppc440spe_adma_desc_slot *iter; | |
449 | int i = 0; | |
450 | u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST : | |
451 | DMA_CDB_OPC_MV_SG1_SG2; | |
452 | /* | |
453 | * Initialize starting from 2nd or 3rd descriptor dependent | |
454 | * on dst_cnt. First one or two slots are for cloning P | |
455 | * and/or Q to chan->pdest and/or chan->qdest as we have | |
456 | * to preserve original P/Q. | |
457 | */ | |
458 | iter = list_first_entry(&desc->group_list, | |
459 | struct ppc440spe_adma_desc_slot, chain_node); | |
460 | iter = list_entry(iter->chain_node.next, | |
461 | struct ppc440spe_adma_desc_slot, chain_node); | |
462 | ||
463 | if (dst_cnt > 1) { | |
464 | iter = list_entry(iter->chain_node.next, | |
465 | struct ppc440spe_adma_desc_slot, chain_node); | |
466 | } | |
467 | /* initialize each source descriptor in chain */ | |
468 | list_for_each_entry_from(iter, &desc->group_list, chain_node) { | |
469 | hw_desc = iter->hw_desc; | |
470 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
471 | iter->src_cnt = 0; | |
472 | iter->dst_cnt = 0; | |
473 | ||
474 | /* This is a ZERO_SUM operation: | |
475 | * - <src_cnt> descriptors starting from 2nd or 3rd | |
476 | * descriptor are for GF-XOR operations; | |
477 | * - remaining <dst_cnt> descriptors are for checking the result | |
478 | */ | |
479 | if (i++ < src_cnt) | |
480 | /* MV_SG1_SG2 if only Q is being verified | |
481 | * MULTICAST if both P and Q are being verified | |
482 | */ | |
483 | hw_desc->opc = dopc; | |
484 | else | |
485 | /* DMA_CDB_OPC_DCHECK128 operation */ | |
486 | hw_desc->opc = DMA_CDB_OPC_DCHECK128; | |
487 | ||
488 | if (likely(!list_is_last(&iter->chain_node, | |
489 | &desc->group_list))) { | |
490 | /* set 'next' pointer */ | |
491 | iter->hw_next = list_entry(iter->chain_node.next, | |
492 | struct ppc440spe_adma_desc_slot, | |
493 | chain_node); | |
494 | } else { | |
495 | /* this is the last descriptor. | |
496 | * this slot will be pasted from ADMA level | |
497 | * each time it wants to configure parameters | |
498 | * of the transaction (src, dst, ...) | |
499 | */ | |
500 | iter->hw_next = NULL; | |
501 | /* always enable interrupt generation since we get | |
502 | * the status of pqzero from the handler | |
503 | */ | |
504 | set_bit(PPC440SPE_DESC_INT, &iter->flags); | |
505 | } | |
506 | } | |
507 | desc->src_cnt = src_cnt; | |
508 | desc->dst_cnt = dst_cnt; | |
509 | } | |
510 | ||
511 | /** | |
512 | * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation | |
513 | */ | |
514 | static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, | |
515 | unsigned long flags) | |
516 | { | |
517 | struct dma_cdb *hw_desc = desc->hw_desc; | |
518 | ||
519 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | |
520 | desc->hw_next = NULL; | |
521 | desc->src_cnt = 1; | |
522 | desc->dst_cnt = 1; | |
523 | ||
524 | if (flags & DMA_PREP_INTERRUPT) | |
525 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | |
526 | else | |
527 | clear_bit(PPC440SPE_DESC_INT, &desc->flags); | |
528 | ||
529 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
530 | } | |
531 | ||
532 | /** | |
533 | * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation | |
534 | */ | |
535 | static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc, | |
536 | int value, unsigned long flags) | |
537 | { | |
538 | struct dma_cdb *hw_desc = desc->hw_desc; | |
539 | ||
540 | memset(desc->hw_desc, 0, sizeof(struct dma_cdb)); | |
541 | desc->hw_next = NULL; | |
542 | desc->src_cnt = 1; | |
543 | desc->dst_cnt = 1; | |
544 | ||
545 | if (flags & DMA_PREP_INTERRUPT) | |
546 | set_bit(PPC440SPE_DESC_INT, &desc->flags); | |
547 | else | |
548 | clear_bit(PPC440SPE_DESC_INT, &desc->flags); | |
549 | ||
550 | hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); | |
551 | hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); | |
552 | hw_desc->opc = DMA_CDB_OPC_DFILL128; | |
553 | } | |
554 | ||
555 | /** | |
556 | * ppc440spe_desc_set_src_addr - set source address into the descriptor | |
557 | */ | |
558 | static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc, | |
559 | struct ppc440spe_adma_chan *chan, | |
560 | int src_idx, dma_addr_t addrh, | |
561 | dma_addr_t addrl) | |
562 | { | |
563 | struct dma_cdb *dma_hw_desc; | |
564 | struct xor_cb *xor_hw_desc; | |
565 | phys_addr_t addr64, tmplow, tmphi; | |
566 | ||
567 | switch (chan->device->id) { | |
568 | case PPC440SPE_DMA0_ID: | |
569 | case PPC440SPE_DMA1_ID: | |
570 | if (!addrh) { | |
571 | addr64 = addrl; | |
572 | tmphi = (addr64 >> 32); | |
573 | tmplow = (addr64 & 0xFFFFFFFF); | |
574 | } else { | |
575 | tmphi = addrh; | |
576 | tmplow = addrl; | |
577 | } | |
578 | dma_hw_desc = desc->hw_desc; | |
579 | dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow); | |
580 | dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi); | |
581 | break; | |
582 | case PPC440SPE_XOR_ID: | |
583 | xor_hw_desc = desc->hw_desc; | |
584 | xor_hw_desc->ops[src_idx].l = addrl; | |
585 | xor_hw_desc->ops[src_idx].h |= addrh; | |
586 | break; | |
587 | } | |
588 | } | |
589 | ||
590 | /** | |
591 | * ppc440spe_desc_set_src_mult - set source address mult into the descriptor | |
592 | */ | |
593 | static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc, | |
594 | struct ppc440spe_adma_chan *chan, u32 mult_index, | |
595 | int sg_index, unsigned char mult_value) | |
596 | { | |
597 | struct dma_cdb *dma_hw_desc; | |
598 | struct xor_cb *xor_hw_desc; | |
599 | u32 *psgu; | |
600 | ||
601 | switch (chan->device->id) { | |
602 | case PPC440SPE_DMA0_ID: | |
603 | case PPC440SPE_DMA1_ID: | |
604 | dma_hw_desc = desc->hw_desc; | |
605 | ||
606 | switch (sg_index) { | |
607 | /* for RXOR operations set multiplier | |
608 | * into source cued address | |
609 | */ | |
610 | case DMA_CDB_SG_SRC: | |
611 | psgu = &dma_hw_desc->sg1u; | |
612 | break; | |
613 | /* for WXOR operations set multiplier | |
614 | * into destination cued address(es) | |
615 | */ | |
616 | case DMA_CDB_SG_DST1: | |
617 | psgu = &dma_hw_desc->sg2u; | |
618 | break; | |
619 | case DMA_CDB_SG_DST2: | |
620 | psgu = &dma_hw_desc->sg3u; | |
621 | break; | |
622 | default: | |
623 | BUG(); | |
624 | } | |
625 | ||
626 | *psgu |= cpu_to_le32(mult_value << mult_index); | |
627 | break; | |
628 | case PPC440SPE_XOR_ID: | |
629 | xor_hw_desc = desc->hw_desc; | |
630 | break; | |
631 | default: | |
632 | BUG(); | |
633 | } | |
634 | } | |
635 | ||
636 | /** | |
637 | * ppc440spe_desc_set_dest_addr - set destination address into the descriptor | |
638 | */ | |
639 | static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc, | |
640 | struct ppc440spe_adma_chan *chan, | |
641 | dma_addr_t addrh, dma_addr_t addrl, | |
642 | u32 dst_idx) | |
643 | { | |
644 | struct dma_cdb *dma_hw_desc; | |
645 | struct xor_cb *xor_hw_desc; | |
646 | phys_addr_t addr64, tmphi, tmplow; | |
647 | u32 *psgu, *psgl; | |
648 | ||
649 | switch (chan->device->id) { | |
650 | case PPC440SPE_DMA0_ID: | |
651 | case PPC440SPE_DMA1_ID: | |
652 | if (!addrh) { | |
653 | addr64 = addrl; | |
654 | tmphi = (addr64 >> 32); | |
655 | tmplow = (addr64 & 0xFFFFFFFF); | |
656 | } else { | |
657 | tmphi = addrh; | |
658 | tmplow = addrl; | |
659 | } | |
660 | dma_hw_desc = desc->hw_desc; | |
661 | ||
662 | psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u; | |
663 | psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l; | |
664 | ||
665 | *psgl = cpu_to_le32((u32)tmplow); | |
666 | *psgu |= cpu_to_le32((u32)tmphi); | |
667 | break; | |
668 | case PPC440SPE_XOR_ID: | |
669 | xor_hw_desc = desc->hw_desc; | |
670 | xor_hw_desc->cbtal = addrl; | |
671 | xor_hw_desc->cbtah |= addrh; | |
672 | break; | |
673 | } | |
674 | } | |
675 | ||
676 | /** | |
677 | * ppc440spe_desc_set_byte_count - set number of data bytes involved | |
678 | * into the operation | |
679 | */ | |
680 | static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc, | |
681 | struct ppc440spe_adma_chan *chan, | |
682 | u32 byte_count) | |
683 | { | |
684 | struct dma_cdb *dma_hw_desc; | |
685 | struct xor_cb *xor_hw_desc; | |
686 | ||
687 | switch (chan->device->id) { | |
688 | case PPC440SPE_DMA0_ID: | |
689 | case PPC440SPE_DMA1_ID: | |
690 | dma_hw_desc = desc->hw_desc; | |
691 | dma_hw_desc->cnt = cpu_to_le32(byte_count); | |
692 | break; | |
693 | case PPC440SPE_XOR_ID: | |
694 | xor_hw_desc = desc->hw_desc; | |
695 | xor_hw_desc->cbbc = byte_count; | |
696 | break; | |
697 | } | |
698 | } | |
699 | ||
700 | /** | |
701 | * ppc440spe_desc_set_rxor_block_size - set RXOR block size | |
702 | */ | |
703 | static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count) | |
704 | { | |
705 | /* assume that byte_count is aligned on the 512-boundary; | |
706 | * thus write it directly to the register (bits 23:31 are | |
707 | * reserved there). | |
708 | */ | |
709 | dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count); | |
710 | } | |
711 | ||
712 | /** | |
713 | * ppc440spe_desc_set_dcheck - set CHECK pattern | |
714 | */ | |
715 | static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc, | |
716 | struct ppc440spe_adma_chan *chan, u8 *qword) | |
717 | { | |
718 | struct dma_cdb *dma_hw_desc; | |
719 | ||
720 | switch (chan->device->id) { | |
721 | case PPC440SPE_DMA0_ID: | |
722 | case PPC440SPE_DMA1_ID: | |
723 | dma_hw_desc = desc->hw_desc; | |
724 | iowrite32(qword[0], &dma_hw_desc->sg3l); | |
725 | iowrite32(qword[4], &dma_hw_desc->sg3u); | |
726 | iowrite32(qword[8], &dma_hw_desc->sg2l); | |
727 | iowrite32(qword[12], &dma_hw_desc->sg2u); | |
728 | break; | |
729 | default: | |
730 | BUG(); | |
731 | } | |
732 | } | |
733 | ||
734 | /** | |
735 | * ppc440spe_xor_set_link - set link address in xor CB | |
736 | */ | |
737 | static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc, | |
738 | struct ppc440spe_adma_desc_slot *next_desc) | |
739 | { | |
740 | struct xor_cb *xor_hw_desc = prev_desc->hw_desc; | |
741 | ||
742 | if (unlikely(!next_desc || !(next_desc->phys))) { | |
743 | printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n", | |
744 | __func__, next_desc, | |
745 | next_desc ? next_desc->phys : 0); | |
746 | BUG(); | |
747 | } | |
748 | ||
749 | xor_hw_desc->cbs = 0; | |
750 | xor_hw_desc->cblal = next_desc->phys; | |
751 | xor_hw_desc->cblah = 0; | |
752 | xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT; | |
753 | } | |
754 | ||
755 | /** | |
756 | * ppc440spe_desc_set_link - set the address of descriptor following this | |
757 | * descriptor in chain | |
758 | */ | |
759 | static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan, | |
760 | struct ppc440spe_adma_desc_slot *prev_desc, | |
761 | struct ppc440spe_adma_desc_slot *next_desc) | |
762 | { | |
763 | unsigned long flags; | |
764 | struct ppc440spe_adma_desc_slot *tail = next_desc; | |
765 | ||
766 | if (unlikely(!prev_desc || !next_desc || | |
767 | (prev_desc->hw_next && prev_desc->hw_next != next_desc))) { | |
768 | /* If previous next is overwritten something is wrong. | |
769 | * though we may refetch from append to initiate list | |
770 | * processing; in this case - it's ok. | |
771 | */ | |
772 | printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; " | |
773 | "prev->hw_next=0x%p\n", __func__, prev_desc, | |
774 | next_desc, prev_desc ? prev_desc->hw_next : 0); | |
775 | BUG(); | |
776 | } | |
777 | ||
778 | local_irq_save(flags); | |
779 | ||
780 | /* do s/w chaining both for DMA and XOR descriptors */ | |
781 | prev_desc->hw_next = next_desc; | |
782 | ||
783 | switch (chan->device->id) { | |
784 | case PPC440SPE_DMA0_ID: | |
785 | case PPC440SPE_DMA1_ID: | |
786 | break; | |
787 | case PPC440SPE_XOR_ID: | |
788 | /* bind descriptor to the chain */ | |
789 | while (tail->hw_next) | |
790 | tail = tail->hw_next; | |
791 | xor_last_linked = tail; | |
792 | ||
793 | if (prev_desc == xor_last_submit) | |
794 | /* do not link to the last submitted CB */ | |
795 | break; | |
796 | ppc440spe_xor_set_link(prev_desc, next_desc); | |
797 | break; | |
798 | } | |
799 | ||
800 | local_irq_restore(flags); | |
801 | } | |
802 | ||
803 | /** | |
804 | * ppc440spe_desc_get_src_addr - extract the source address from the descriptor | |
805 | */ | |
806 | static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc, | |
807 | struct ppc440spe_adma_chan *chan, int src_idx) | |
808 | { | |
809 | struct dma_cdb *dma_hw_desc; | |
810 | struct xor_cb *xor_hw_desc; | |
811 | ||
812 | switch (chan->device->id) { | |
813 | case PPC440SPE_DMA0_ID: | |
814 | case PPC440SPE_DMA1_ID: | |
815 | dma_hw_desc = desc->hw_desc; | |
816 | /* May have 0, 1, 2, or 3 sources */ | |
817 | switch (dma_hw_desc->opc) { | |
818 | case DMA_CDB_OPC_NO_OP: | |
819 | case DMA_CDB_OPC_DFILL128: | |
820 | return 0; | |
821 | case DMA_CDB_OPC_DCHECK128: | |
822 | if (unlikely(src_idx)) { | |
823 | printk(KERN_ERR "%s: try to get %d source for" | |
824 | " DCHECK128\n", __func__, src_idx); | |
825 | BUG(); | |
826 | } | |
827 | return le32_to_cpu(dma_hw_desc->sg1l); | |
828 | case DMA_CDB_OPC_MULTICAST: | |
829 | case DMA_CDB_OPC_MV_SG1_SG2: | |
830 | if (unlikely(src_idx > 2)) { | |
831 | printk(KERN_ERR "%s: try to get %d source from" | |
832 | " DMA descr\n", __func__, src_idx); | |
833 | BUG(); | |
834 | } | |
835 | if (src_idx) { | |
836 | if (le32_to_cpu(dma_hw_desc->sg1u) & | |
837 | DMA_CUED_XOR_WIN_MSK) { | |
838 | u8 region; | |
839 | ||
840 | if (src_idx == 1) | |
841 | return le32_to_cpu( | |
842 | dma_hw_desc->sg1l) + | |
843 | desc->unmap_len; | |
844 | ||
845 | region = (le32_to_cpu( | |
846 | dma_hw_desc->sg1u)) >> | |
847 | DMA_CUED_REGION_OFF; | |
848 | ||
849 | region &= DMA_CUED_REGION_MSK; | |
850 | switch (region) { | |
851 | case DMA_RXOR123: | |
852 | return le32_to_cpu( | |
853 | dma_hw_desc->sg1l) + | |
854 | (desc->unmap_len << 1); | |
855 | case DMA_RXOR124: | |
856 | return le32_to_cpu( | |
857 | dma_hw_desc->sg1l) + | |
858 | (desc->unmap_len * 3); | |
859 | case DMA_RXOR125: | |
860 | return le32_to_cpu( | |
861 | dma_hw_desc->sg1l) + | |
862 | (desc->unmap_len << 2); | |
863 | default: | |
864 | printk(KERN_ERR | |
865 | "%s: try to" | |
866 | " get src3 for region %02x" | |
867 | "PPC440SPE_DESC_RXOR12?\n", | |
868 | __func__, region); | |
869 | BUG(); | |
870 | } | |
871 | } else { | |
872 | printk(KERN_ERR | |
873 | "%s: try to get %d" | |
874 | " source for non-cued descr\n", | |
875 | __func__, src_idx); | |
876 | BUG(); | |
877 | } | |
878 | } | |
879 | return le32_to_cpu(dma_hw_desc->sg1l); | |
880 | default: | |
881 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | |
882 | __func__, dma_hw_desc->opc); | |
883 | BUG(); | |
884 | } | |
885 | return le32_to_cpu(dma_hw_desc->sg1l); | |
886 | case PPC440SPE_XOR_ID: | |
887 | /* May have up to 16 sources */ | |
888 | xor_hw_desc = desc->hw_desc; | |
889 | return xor_hw_desc->ops[src_idx].l; | |
890 | } | |
891 | return 0; | |
892 | } | |
893 | ||
894 | /** | |
895 | * ppc440spe_desc_get_dest_addr - extract the destination address from the | |
896 | * descriptor | |
897 | */ | |
898 | static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc, | |
899 | struct ppc440spe_adma_chan *chan, int idx) | |
900 | { | |
901 | struct dma_cdb *dma_hw_desc; | |
902 | struct xor_cb *xor_hw_desc; | |
903 | ||
904 | switch (chan->device->id) { | |
905 | case PPC440SPE_DMA0_ID: | |
906 | case PPC440SPE_DMA1_ID: | |
907 | dma_hw_desc = desc->hw_desc; | |
908 | ||
909 | if (likely(!idx)) | |
910 | return le32_to_cpu(dma_hw_desc->sg2l); | |
911 | return le32_to_cpu(dma_hw_desc->sg3l); | |
912 | case PPC440SPE_XOR_ID: | |
913 | xor_hw_desc = desc->hw_desc; | |
914 | return xor_hw_desc->cbtal; | |
915 | } | |
916 | return 0; | |
917 | } | |
918 | ||
919 | /** | |
920 | * ppc440spe_desc_get_src_num - extract the number of source addresses from | |
921 | * the descriptor | |
922 | */ | |
923 | static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc, | |
924 | struct ppc440spe_adma_chan *chan) | |
925 | { | |
926 | struct dma_cdb *dma_hw_desc; | |
927 | struct xor_cb *xor_hw_desc; | |
928 | ||
929 | switch (chan->device->id) { | |
930 | case PPC440SPE_DMA0_ID: | |
931 | case PPC440SPE_DMA1_ID: | |
932 | dma_hw_desc = desc->hw_desc; | |
933 | ||
934 | switch (dma_hw_desc->opc) { | |
935 | case DMA_CDB_OPC_NO_OP: | |
936 | case DMA_CDB_OPC_DFILL128: | |
937 | return 0; | |
938 | case DMA_CDB_OPC_DCHECK128: | |
939 | return 1; | |
940 | case DMA_CDB_OPC_MV_SG1_SG2: | |
941 | case DMA_CDB_OPC_MULTICAST: | |
942 | /* | |
943 | * Only for RXOR operations we have more than | |
944 | * one source | |
945 | */ | |
946 | if (le32_to_cpu(dma_hw_desc->sg1u) & | |
947 | DMA_CUED_XOR_WIN_MSK) { | |
948 | /* RXOR op, there are 2 or 3 sources */ | |
949 | if (((le32_to_cpu(dma_hw_desc->sg1u) >> | |
950 | DMA_CUED_REGION_OFF) & | |
951 | DMA_CUED_REGION_MSK) == DMA_RXOR12) { | |
952 | /* RXOR 1-2 */ | |
953 | return 2; | |
954 | } else { | |
955 | /* RXOR 1-2-3/1-2-4/1-2-5 */ | |
956 | return 3; | |
957 | } | |
958 | } | |
959 | return 1; | |
960 | default: | |
961 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | |
962 | __func__, dma_hw_desc->opc); | |
963 | BUG(); | |
964 | } | |
965 | case PPC440SPE_XOR_ID: | |
966 | /* up to 16 sources */ | |
967 | xor_hw_desc = desc->hw_desc; | |
968 | return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK; | |
969 | default: | |
970 | BUG(); | |
971 | } | |
972 | return 0; | |
973 | } | |
974 | ||
975 | /** | |
976 | * ppc440spe_desc_get_dst_num - get the number of destination addresses in | |
977 | * this descriptor | |
978 | */ | |
979 | static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc, | |
980 | struct ppc440spe_adma_chan *chan) | |
981 | { | |
982 | struct dma_cdb *dma_hw_desc; | |
983 | ||
984 | switch (chan->device->id) { | |
985 | case PPC440SPE_DMA0_ID: | |
986 | case PPC440SPE_DMA1_ID: | |
987 | /* May be 1 or 2 destinations */ | |
988 | dma_hw_desc = desc->hw_desc; | |
989 | switch (dma_hw_desc->opc) { | |
990 | case DMA_CDB_OPC_NO_OP: | |
991 | case DMA_CDB_OPC_DCHECK128: | |
992 | return 0; | |
993 | case DMA_CDB_OPC_MV_SG1_SG2: | |
994 | case DMA_CDB_OPC_DFILL128: | |
995 | return 1; | |
996 | case DMA_CDB_OPC_MULTICAST: | |
997 | if (desc->dst_cnt == 2) | |
998 | return 2; | |
999 | else | |
1000 | return 1; | |
1001 | default: | |
1002 | printk(KERN_ERR "%s: unknown OPC 0x%02x\n", | |
1003 | __func__, dma_hw_desc->opc); | |
1004 | BUG(); | |
1005 | } | |
1006 | case PPC440SPE_XOR_ID: | |
1007 | /* Always only 1 destination */ | |
1008 | return 1; | |
1009 | default: | |
1010 | BUG(); | |
1011 | } | |
1012 | return 0; | |
1013 | } | |
1014 | ||
1015 | /** | |
1016 | * ppc440spe_desc_get_link - get the address of the descriptor that | |
1017 | * follows this one | |
1018 | */ | |
1019 | static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc, | |
1020 | struct ppc440spe_adma_chan *chan) | |
1021 | { | |
1022 | if (!desc->hw_next) | |
1023 | return 0; | |
1024 | ||
1025 | return desc->hw_next->phys; | |
1026 | } | |
1027 | ||
1028 | /** | |
1029 | * ppc440spe_desc_is_aligned - check alignment | |
1030 | */ | |
1031 | static inline int ppc440spe_desc_is_aligned( | |
1032 | struct ppc440spe_adma_desc_slot *desc, int num_slots) | |
1033 | { | |
1034 | return (desc->idx & (num_slots - 1)) ? 0 : 1; | |
1035 | } | |
1036 | ||
1037 | /** | |
1038 | * ppc440spe_chan_xor_slot_count - get the number of slots necessary for | |
1039 | * XOR operation | |
1040 | */ | |
1041 | static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt, | |
1042 | int *slots_per_op) | |
1043 | { | |
1044 | int slot_cnt; | |
1045 | ||
1046 | /* each XOR descriptor provides up to 16 source operands */ | |
1047 | slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS; | |
1048 | ||
1049 | if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)) | |
1050 | return slot_cnt; | |
1051 | ||
1052 | printk(KERN_ERR "%s: len %d > max %d !!\n", | |
1053 | __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT); | |
1054 | BUG(); | |
1055 | return slot_cnt; | |
1056 | } | |
1057 | ||
1058 | /** | |
1059 | * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for | |
1060 | * DMA2 PQ operation | |
1061 | */ | |
1062 | static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs, | |
1063 | int src_cnt, size_t len) | |
1064 | { | |
1065 | signed long long order = 0; | |
1066 | int state = 0; | |
1067 | int addr_count = 0; | |
1068 | int i; | |
1069 | for (i = 1; i < src_cnt; i++) { | |
1070 | dma_addr_t cur_addr = srcs[i]; | |
1071 | dma_addr_t old_addr = srcs[i-1]; | |
1072 | switch (state) { | |
1073 | case 0: | |
1074 | if (cur_addr == old_addr + len) { | |
1075 | /* direct RXOR */ | |
1076 | order = 1; | |
1077 | state = 1; | |
1078 | if (i == src_cnt-1) | |
1079 | addr_count++; | |
1080 | } else if (old_addr == cur_addr + len) { | |
1081 | /* reverse RXOR */ | |
1082 | order = -1; | |
1083 | state = 1; | |
1084 | if (i == src_cnt-1) | |
1085 | addr_count++; | |
1086 | } else { | |
1087 | state = 3; | |
1088 | } | |
1089 | break; | |
1090 | case 1: | |
1091 | if (i == src_cnt-2 || (order == -1 | |
1092 | && cur_addr != old_addr - len)) { | |
1093 | order = 0; | |
1094 | state = 0; | |
1095 | addr_count++; | |
1096 | } else if (cur_addr == old_addr + len*order) { | |
1097 | state = 2; | |
1098 | if (i == src_cnt-1) | |
1099 | addr_count++; | |
1100 | } else if (cur_addr == old_addr + 2*len) { | |
1101 | state = 2; | |
1102 | if (i == src_cnt-1) | |
1103 | addr_count++; | |
1104 | } else if (cur_addr == old_addr + 3*len) { | |
1105 | state = 2; | |
1106 | if (i == src_cnt-1) | |
1107 | addr_count++; | |
1108 | } else { | |
1109 | order = 0; | |
1110 | state = 0; | |
1111 | addr_count++; | |
1112 | } | |
1113 | break; | |
1114 | case 2: | |
1115 | order = 0; | |
1116 | state = 0; | |
1117 | addr_count++; | |
1118 | break; | |
1119 | } | |
1120 | if (state == 3) | |
1121 | break; | |
1122 | } | |
1123 | if (src_cnt <= 1 || (state != 1 && state != 2)) { | |
1124 | pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n", | |
1125 | __func__, src_cnt, state, addr_count, order); | |
1126 | for (i = 0; i < src_cnt; i++) | |
1127 | pr_err("\t[%d] 0x%llx \n", i, srcs[i]); | |
1128 | BUG(); | |
1129 | } | |
1130 | ||
1131 | return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS; | |
1132 | } | |
1133 | ||
1134 | ||
1135 | /****************************************************************************** | |
1136 | * ADMA channel low-level routines | |
1137 | ******************************************************************************/ | |
1138 | ||
1139 | static u32 | |
1140 | ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan); | |
1141 | static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan); | |
1142 | ||
1143 | /** | |
1144 | * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine | |
1145 | */ | |
1146 | static void ppc440spe_adma_device_clear_eot_status( | |
1147 | struct ppc440spe_adma_chan *chan) | |
1148 | { | |
1149 | struct dma_regs *dma_reg; | |
1150 | struct xor_regs *xor_reg; | |
1151 | u8 *p = chan->device->dma_desc_pool_virt; | |
1152 | struct dma_cdb *cdb; | |
1153 | u32 rv, i; | |
1154 | ||
1155 | switch (chan->device->id) { | |
1156 | case PPC440SPE_DMA0_ID: | |
1157 | case PPC440SPE_DMA1_ID: | |
1158 | /* read FIFO to ack */ | |
1159 | dma_reg = chan->device->dma_reg; | |
1160 | while ((rv = ioread32(&dma_reg->csfpl))) { | |
1161 | i = rv & DMA_CDB_ADDR_MSK; | |
1162 | cdb = (struct dma_cdb *)&p[i - | |
1163 | (u32)chan->device->dma_desc_pool]; | |
1164 | ||
1165 | /* Clear opcode to ack. This is necessary for | |
1166 | * ZeroSum operations only | |
1167 | */ | |
1168 | cdb->opc = 0; | |
1169 | ||
1170 | if (test_bit(PPC440SPE_RXOR_RUN, | |
1171 | &ppc440spe_rxor_state)) { | |
1172 | /* probably this is a completed RXOR op, | |
1173 | * get pointer to CDB using the fact that | |
1174 | * physical and virtual addresses of CDB | |
1175 | * in pools have the same offsets | |
1176 | */ | |
1177 | if (le32_to_cpu(cdb->sg1u) & | |
1178 | DMA_CUED_XOR_BASE) { | |
1179 | /* this is a RXOR */ | |
1180 | clear_bit(PPC440SPE_RXOR_RUN, | |
1181 | &ppc440spe_rxor_state); | |
1182 | } | |
1183 | } | |
1184 | ||
1185 | if (rv & DMA_CDB_STATUS_MSK) { | |
1186 | /* ZeroSum check failed | |
1187 | */ | |
1188 | struct ppc440spe_adma_desc_slot *iter; | |
1189 | dma_addr_t phys = rv & ~DMA_CDB_MSK; | |
1190 | ||
1191 | /* | |
1192 | * Update the status of corresponding | |
1193 | * descriptor. | |
1194 | */ | |
1195 | list_for_each_entry(iter, &chan->chain, | |
1196 | chain_node) { | |
1197 | if (iter->phys == phys) | |
1198 | break; | |
1199 | } | |
1200 | /* | |
1201 | * if cannot find the corresponding | |
1202 | * slot it's a bug | |
1203 | */ | |
1204 | BUG_ON(&iter->chain_node == &chan->chain); | |
1205 | ||
1206 | if (iter->xor_check_result) { | |
1207 | if (test_bit(PPC440SPE_DESC_PCHECK, | |
1208 | &iter->flags)) { | |
1209 | *iter->xor_check_result |= | |
1210 | SUM_CHECK_P_RESULT; | |
1211 | } else | |
1212 | if (test_bit(PPC440SPE_DESC_QCHECK, | |
1213 | &iter->flags)) { | |
1214 | *iter->xor_check_result |= | |
1215 | SUM_CHECK_Q_RESULT; | |
1216 | } else | |
1217 | BUG(); | |
1218 | } | |
1219 | } | |
1220 | } | |
1221 | ||
1222 | rv = ioread32(&dma_reg->dsts); | |
1223 | if (rv) { | |
1224 | pr_err("DMA%d err status: 0x%x\n", | |
1225 | chan->device->id, rv); | |
1226 | /* write back to clear */ | |
1227 | iowrite32(rv, &dma_reg->dsts); | |
1228 | } | |
1229 | break; | |
1230 | case PPC440SPE_XOR_ID: | |
1231 | /* reset status bits to ack */ | |
1232 | xor_reg = chan->device->xor_reg; | |
1233 | rv = ioread32be(&xor_reg->sr); | |
1234 | iowrite32be(rv, &xor_reg->sr); | |
1235 | ||
1236 | if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) { | |
1237 | if (rv & XOR_IE_RPTIE_BIT) { | |
1238 | /* Read PLB Timeout Error. | |
1239 | * Try to resubmit the CB | |
1240 | */ | |
1241 | u32 val = ioread32be(&xor_reg->ccbalr); | |
1242 | ||
1243 | iowrite32be(val, &xor_reg->cblalr); | |
1244 | ||
1245 | val = ioread32be(&xor_reg->crsr); | |
1246 | iowrite32be(val | XOR_CRSR_XAE_BIT, | |
1247 | &xor_reg->crsr); | |
1248 | } else | |
1249 | pr_err("XOR ERR 0x%x status\n", rv); | |
1250 | break; | |
1251 | } | |
1252 | ||
1253 | /* if the XORcore is idle, but there are unprocessed CBs | |
1254 | * then refetch the s/w chain here | |
1255 | */ | |
1256 | if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) && | |
1257 | do_xor_refetch) | |
1258 | ppc440spe_chan_append(chan); | |
1259 | break; | |
1260 | } | |
1261 | } | |
1262 | ||
1263 | /** | |
1264 | * ppc440spe_chan_is_busy - get the channel status | |
1265 | */ | |
1266 | static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan) | |
1267 | { | |
1268 | struct dma_regs *dma_reg; | |
1269 | struct xor_regs *xor_reg; | |
1270 | int busy = 0; | |
1271 | ||
1272 | switch (chan->device->id) { | |
1273 | case PPC440SPE_DMA0_ID: | |
1274 | case PPC440SPE_DMA1_ID: | |
1275 | dma_reg = chan->device->dma_reg; | |
1276 | /* if command FIFO's head and tail pointers are equal and | |
1277 | * status tail is the same as command, then channel is free | |
1278 | */ | |
1279 | if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) || | |
1280 | ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp)) | |
1281 | busy = 1; | |
1282 | break; | |
1283 | case PPC440SPE_XOR_ID: | |
1284 | /* use the special status bit for the XORcore | |
1285 | */ | |
1286 | xor_reg = chan->device->xor_reg; | |
1287 | busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0; | |
1288 | break; | |
1289 | } | |
1290 | ||
1291 | return busy; | |
1292 | } | |
1293 | ||
1294 | /** | |
1295 | * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain | |
1296 | */ | |
1297 | static void ppc440spe_chan_set_first_xor_descriptor( | |
1298 | struct ppc440spe_adma_chan *chan, | |
1299 | struct ppc440spe_adma_desc_slot *next_desc) | |
1300 | { | |
1301 | struct xor_regs *xor_reg = chan->device->xor_reg; | |
1302 | ||
1303 | if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) | |
1304 | printk(KERN_INFO "%s: Warn: XORcore is running " | |
1305 | "when try to set the first CDB!\n", | |
1306 | __func__); | |
1307 | ||
1308 | xor_last_submit = xor_last_linked = next_desc; | |
1309 | ||
1310 | iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr); | |
1311 | ||
1312 | iowrite32be(next_desc->phys, &xor_reg->cblalr); | |
1313 | iowrite32be(0, &xor_reg->cblahr); | |
1314 | iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT, | |
1315 | &xor_reg->cbcr); | |
1316 | ||
1317 | chan->hw_chain_inited = 1; | |
1318 | } | |
1319 | ||
1320 | /** | |
1321 | * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO. | |
1322 | * called with irqs disabled | |
1323 | */ | |
1324 | static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan, | |
1325 | struct ppc440spe_adma_desc_slot *desc) | |
1326 | { | |
1327 | u32 pcdb; | |
1328 | struct dma_regs *dma_reg = chan->device->dma_reg; | |
1329 | ||
1330 | pcdb = desc->phys; | |
1331 | if (!test_bit(PPC440SPE_DESC_INT, &desc->flags)) | |
1332 | pcdb |= DMA_CDB_NO_INT; | |
1333 | ||
1334 | chan_last_sub[chan->device->id] = desc; | |
1335 | ||
1336 | ADMA_LL_DBG(print_cb(chan, desc->hw_desc)); | |
1337 | ||
1338 | iowrite32(pcdb, &dma_reg->cpfpl); | |
1339 | } | |
1340 | ||
1341 | /** | |
1342 | * ppc440spe_chan_append - update the h/w chain in the channel | |
1343 | */ | |
1344 | static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan) | |
1345 | { | |
1346 | struct xor_regs *xor_reg; | |
1347 | struct ppc440spe_adma_desc_slot *iter; | |
1348 | struct xor_cb *xcb; | |
1349 | u32 cur_desc; | |
1350 | unsigned long flags; | |
1351 | ||
1352 | local_irq_save(flags); | |
1353 | ||
1354 | switch (chan->device->id) { | |
1355 | case PPC440SPE_DMA0_ID: | |
1356 | case PPC440SPE_DMA1_ID: | |
1357 | cur_desc = ppc440spe_chan_get_current_descriptor(chan); | |
1358 | ||
1359 | if (likely(cur_desc)) { | |
1360 | iter = chan_last_sub[chan->device->id]; | |
1361 | BUG_ON(!iter); | |
1362 | } else { | |
1363 | /* first peer */ | |
1364 | iter = chan_first_cdb[chan->device->id]; | |
1365 | BUG_ON(!iter); | |
1366 | ppc440spe_dma_put_desc(chan, iter); | |
1367 | chan->hw_chain_inited = 1; | |
1368 | } | |
1369 | ||
1370 | /* is there something new to append */ | |
1371 | if (!iter->hw_next) | |
1372 | break; | |
1373 | ||
1374 | /* flush descriptors from the s/w queue to fifo */ | |
1375 | list_for_each_entry_continue(iter, &chan->chain, chain_node) { | |
1376 | ppc440spe_dma_put_desc(chan, iter); | |
1377 | if (!iter->hw_next) | |
1378 | break; | |
1379 | } | |
1380 | break; | |
1381 | case PPC440SPE_XOR_ID: | |
1382 | /* update h/w links and refetch */ | |
1383 | if (!xor_last_submit->hw_next) | |
1384 | break; | |
1385 | ||
1386 | xor_reg = chan->device->xor_reg; | |
1387 | /* the last linked CDB has to generate an interrupt | |
1388 | * that we'd be able to append the next lists to h/w | |
1389 | * regardless of the XOR engine state at the moment of | |
1390 | * appending of these next lists | |
1391 | */ | |
1392 | xcb = xor_last_linked->hw_desc; | |
1393 | xcb->cbc |= XOR_CBCR_CBCE_BIT; | |
1394 | ||
1395 | if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) { | |
1396 | /* XORcore is idle. Refetch now */ | |
1397 | do_xor_refetch = 0; | |
1398 | ppc440spe_xor_set_link(xor_last_submit, | |
1399 | xor_last_submit->hw_next); | |
1400 | ||
1401 | ADMA_LL_DBG(print_cb_list(chan, | |
1402 | xor_last_submit->hw_next)); | |
1403 | ||
1404 | xor_last_submit = xor_last_linked; | |
1405 | iowrite32be(ioread32be(&xor_reg->crsr) | | |
1406 | XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT, | |
1407 | &xor_reg->crsr); | |
1408 | } else { | |
1409 | /* XORcore is running. Refetch later in the handler */ | |
1410 | do_xor_refetch = 1; | |
1411 | } | |
1412 | ||
1413 | break; | |
1414 | } | |
1415 | ||
1416 | local_irq_restore(flags); | |
1417 | } | |
1418 | ||
1419 | /** | |
1420 | * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor | |
1421 | */ | |
1422 | static u32 | |
1423 | ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan) | |
1424 | { | |
1425 | struct dma_regs *dma_reg; | |
1426 | struct xor_regs *xor_reg; | |
1427 | ||
1428 | if (unlikely(!chan->hw_chain_inited)) | |
1429 | /* h/w descriptor chain is not initialized yet */ | |
1430 | return 0; | |
1431 | ||
1432 | switch (chan->device->id) { | |
1433 | case PPC440SPE_DMA0_ID: | |
1434 | case PPC440SPE_DMA1_ID: | |
1435 | dma_reg = chan->device->dma_reg; | |
1436 | return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK); | |
1437 | case PPC440SPE_XOR_ID: | |
1438 | xor_reg = chan->device->xor_reg; | |
1439 | return ioread32be(&xor_reg->ccbalr); | |
1440 | } | |
1441 | return 0; | |
1442 | } | |
1443 | ||
1444 | /** | |
1445 | * ppc440spe_chan_run - enable the channel | |
1446 | */ | |
1447 | static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan) | |
1448 | { | |
1449 | struct xor_regs *xor_reg; | |
1450 | ||
1451 | switch (chan->device->id) { | |
1452 | case PPC440SPE_DMA0_ID: | |
1453 | case PPC440SPE_DMA1_ID: | |
1454 | /* DMAs are always enabled, do nothing */ | |
1455 | break; | |
1456 | case PPC440SPE_XOR_ID: | |
1457 | /* drain write buffer */ | |
1458 | xor_reg = chan->device->xor_reg; | |
1459 | ||
1460 | /* fetch descriptor pointed to in <link> */ | |
1461 | iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT, | |
1462 | &xor_reg->crsr); | |
1463 | break; | |
1464 | } | |
1465 | } | |
1466 | ||
1467 | /****************************************************************************** | |
1468 | * ADMA device level | |
1469 | ******************************************************************************/ | |
1470 | ||
1471 | static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan); | |
1472 | static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan); | |
1473 | ||
1474 | static dma_cookie_t | |
1475 | ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx); | |
1476 | ||
1477 | static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx, | |
1478 | dma_addr_t addr, int index); | |
1479 | static void | |
1480 | ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx, | |
1481 | dma_addr_t addr, int index); | |
1482 | ||
1483 | static void | |
1484 | ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx, | |
1485 | dma_addr_t *paddr, unsigned long flags); | |
1486 | static void | |
1487 | ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx, | |
1488 | dma_addr_t addr, int index); | |
1489 | static void | |
1490 | ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx, | |
1491 | unsigned char mult, int index, int dst_pos); | |
1492 | static void | |
1493 | ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx, | |
1494 | dma_addr_t paddr, dma_addr_t qaddr); | |
1495 | ||
1496 | static struct page *ppc440spe_rxor_srcs[32]; | |
1497 | ||
1498 | /** | |
1499 | * ppc440spe_can_rxor - check if the operands may be processed with RXOR | |
1500 | */ | |
1501 | static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len) | |
1502 | { | |
1503 | int i, order = 0, state = 0; | |
1504 | int idx = 0; | |
1505 | ||
1506 | if (unlikely(!(src_cnt > 1))) | |
1507 | return 0; | |
1508 | ||
1509 | BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs)); | |
1510 | ||
1511 | /* Skip holes in the source list before checking */ | |
1512 | for (i = 0; i < src_cnt; i++) { | |
1513 | if (!srcs[i]) | |
1514 | continue; | |
1515 | ppc440spe_rxor_srcs[idx++] = srcs[i]; | |
1516 | } | |
1517 | src_cnt = idx; | |
1518 | ||
1519 | for (i = 1; i < src_cnt; i++) { | |
1520 | char *cur_addr = page_address(ppc440spe_rxor_srcs[i]); | |
1521 | char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]); | |
1522 | ||
1523 | switch (state) { | |
1524 | case 0: | |
1525 | if (cur_addr == old_addr + len) { | |
1526 | /* direct RXOR */ | |
1527 | order = 1; | |
1528 | state = 1; | |
1529 | } else if (old_addr == cur_addr + len) { | |
1530 | /* reverse RXOR */ | |
1531 | order = -1; | |
1532 | state = 1; | |
1533 | } else | |
1534 | goto out; | |
1535 | break; | |
1536 | case 1: | |
1537 | if ((i == src_cnt - 2) || | |
1538 | (order == -1 && cur_addr != old_addr - len)) { | |
1539 | order = 0; | |
1540 | state = 0; | |
1541 | } else if ((cur_addr == old_addr + len * order) || | |
1542 | (cur_addr == old_addr + 2 * len) || | |
1543 | (cur_addr == old_addr + 3 * len)) { | |
1544 | state = 2; | |
1545 | } else { | |
1546 | order = 0; | |
1547 | state = 0; | |
1548 | } | |
1549 | break; | |
1550 | case 2: | |
1551 | order = 0; | |
1552 | state = 0; | |
1553 | break; | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | out: | |
1558 | if (state == 1 || state == 2) | |
1559 | return 1; | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | /** | |
1565 | * ppc440spe_adma_device_estimate - estimate the efficiency of processing | |
1566 | * the operation given on this channel. It's assumed that 'chan' is | |
1567 | * capable to process 'cap' type of operation. | |
1568 | * @chan: channel to use | |
1569 | * @cap: type of transaction | |
1570 | * @dst_lst: array of destination pointers | |
1571 | * @dst_cnt: number of destination operands | |
1572 | * @src_lst: array of source pointers | |
1573 | * @src_cnt: number of source operands | |
1574 | * @src_sz: size of each source operand | |
1575 | */ | |
1576 | static int ppc440spe_adma_estimate(struct dma_chan *chan, | |
1577 | enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt, | |
1578 | struct page **src_lst, int src_cnt, size_t src_sz) | |
1579 | { | |
1580 | int ef = 1; | |
1581 | ||
1582 | if (cap == DMA_PQ || cap == DMA_PQ_VAL) { | |
1583 | /* If RAID-6 capabilities were not activated don't try | |
1584 | * to use them | |
1585 | */ | |
1586 | if (unlikely(!ppc440spe_r6_enabled)) | |
1587 | return -1; | |
1588 | } | |
1589 | /* In the current implementation of ppc440spe ADMA driver it | |
1590 | * makes sense to pick out only pq case, because it may be | |
1591 | * processed: | |
1592 | * (1) either using Biskup method on DMA2; | |
1593 | * (2) or on DMA0/1. | |
1594 | * Thus we give a favour to (1) if the sources are suitable; | |
1595 | * else let it be processed on one of the DMA0/1 engines. | |
1596 | * In the sum_product case where destination is also the | |
1597 | * source process it on DMA0/1 only. | |
1598 | */ | |
1599 | if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) { | |
1600 | ||
1601 | if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1]) | |
1602 | ef = 0; /* sum_product case, process on DMA0/1 */ | |
1603 | else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz)) | |
1604 | ef = 3; /* override (DMA0/1 + idle) */ | |
1605 | else | |
1606 | ef = 0; /* can't process on DMA2 if !rxor */ | |
1607 | } | |
1608 | ||
1609 | /* channel idleness increases the priority */ | |
1610 | if (likely(ef) && | |
1611 | !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan))) | |
1612 | ef++; | |
1613 | ||
1614 | return ef; | |
1615 | } | |
1616 | ||
1617 | struct dma_chan * | |
1618 | ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap, | |
1619 | struct page **dst_lst, int dst_cnt, struct page **src_lst, | |
1620 | int src_cnt, size_t src_sz) | |
1621 | { | |
1622 | struct dma_chan *best_chan = NULL; | |
1623 | struct ppc_dma_chan_ref *ref; | |
1624 | int best_rank = -1; | |
1625 | ||
1626 | if (unlikely(!src_sz)) | |
1627 | return NULL; | |
1628 | if (src_sz > PAGE_SIZE) { | |
1629 | /* | |
1630 | * should a user of the api ever pass > PAGE_SIZE requests | |
1631 | * we sort out cases where temporary page-sized buffers | |
1632 | * are used. | |
1633 | */ | |
1634 | switch (cap) { | |
1635 | case DMA_PQ: | |
1636 | if (src_cnt == 1 && dst_lst[1] == src_lst[0]) | |
1637 | return NULL; | |
1638 | if (src_cnt == 2 && dst_lst[1] == src_lst[1]) | |
1639 | return NULL; | |
1640 | break; | |
1641 | case DMA_PQ_VAL: | |
1642 | case DMA_XOR_VAL: | |
1643 | return NULL; | |
1644 | default: | |
1645 | break; | |
1646 | } | |
1647 | } | |
1648 | ||
1649 | list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) { | |
1650 | if (dma_has_cap(cap, ref->chan->device->cap_mask)) { | |
1651 | int rank; | |
1652 | ||
1653 | rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst, | |
1654 | dst_cnt, src_lst, src_cnt, src_sz); | |
1655 | if (rank > best_rank) { | |
1656 | best_rank = rank; | |
1657 | best_chan = ref->chan; | |
1658 | } | |
1659 | } | |
1660 | } | |
1661 | ||
1662 | return best_chan; | |
1663 | } | |
1664 | EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel); | |
1665 | ||
1666 | /** | |
1667 | * ppc440spe_get_group_entry - get group entry with index idx | |
1668 | * @tdesc: is the last allocated slot in the group. | |
1669 | */ | |
1670 | static struct ppc440spe_adma_desc_slot * | |
1671 | ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx) | |
1672 | { | |
1673 | struct ppc440spe_adma_desc_slot *iter = tdesc->group_head; | |
1674 | int i = 0; | |
1675 | ||
1676 | if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) { | |
1677 | printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n", | |
1678 | __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt); | |
1679 | BUG(); | |
1680 | } | |
1681 | ||
1682 | list_for_each_entry(iter, &tdesc->group_list, chain_node) { | |
1683 | if (i++ == entry_idx) | |
1684 | break; | |
1685 | } | |
1686 | return iter; | |
1687 | } | |
1688 | ||
1689 | /** | |
1690 | * ppc440spe_adma_free_slots - flags descriptor slots for reuse | |
1691 | * @slot: Slot to free | |
1692 | * Caller must hold &ppc440spe_chan->lock while calling this function | |
1693 | */ | |
1694 | static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot, | |
1695 | struct ppc440spe_adma_chan *chan) | |
1696 | { | |
1697 | int stride = slot->slots_per_op; | |
1698 | ||
1699 | while (stride--) { | |
1700 | slot->slots_per_op = 0; | |
1701 | slot = list_entry(slot->slot_node.next, | |
1702 | struct ppc440spe_adma_desc_slot, | |
1703 | slot_node); | |
1704 | } | |
1705 | } | |
1706 | ||
1707 | static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan, | |
1708 | struct ppc440spe_adma_desc_slot *desc) | |
1709 | { | |
1710 | u32 src_cnt, dst_cnt; | |
1711 | dma_addr_t addr; | |
1712 | ||
1713 | /* | |
1714 | * get the number of sources & destination | |
1715 | * included in this descriptor and unmap | |
1716 | * them all | |
1717 | */ | |
1718 | src_cnt = ppc440spe_desc_get_src_num(desc, chan); | |
1719 | dst_cnt = ppc440spe_desc_get_dst_num(desc, chan); | |
1720 | ||
1721 | /* unmap destinations */ | |
1722 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | |
1723 | while (dst_cnt--) { | |
1724 | addr = ppc440spe_desc_get_dest_addr( | |
1725 | desc, chan, dst_cnt); | |
1726 | dma_unmap_page(chan->device->dev, | |
1727 | addr, desc->unmap_len, | |
1728 | DMA_FROM_DEVICE); | |
1729 | } | |
1730 | } | |
1731 | ||
1732 | /* unmap sources */ | |
1733 | if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
1734 | while (src_cnt--) { | |
1735 | addr = ppc440spe_desc_get_src_addr( | |
1736 | desc, chan, src_cnt); | |
1737 | dma_unmap_page(chan->device->dev, | |
1738 | addr, desc->unmap_len, | |
1739 | DMA_TO_DEVICE); | |
1740 | } | |
1741 | } | |
1742 | } | |
1743 | ||
1744 | /** | |
1745 | * ppc440spe_adma_run_tx_complete_actions - call functions to be called | |
1746 | * upon completion | |
1747 | */ | |
1748 | static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( | |
1749 | struct ppc440spe_adma_desc_slot *desc, | |
1750 | struct ppc440spe_adma_chan *chan, | |
1751 | dma_cookie_t cookie) | |
1752 | { | |
1753 | int i; | |
1754 | ||
1755 | BUG_ON(desc->async_tx.cookie < 0); | |
1756 | if (desc->async_tx.cookie > 0) { | |
1757 | cookie = desc->async_tx.cookie; | |
1758 | desc->async_tx.cookie = 0; | |
1759 | ||
1760 | /* call the callback (must not sleep or submit new | |
1761 | * operations to this channel) | |
1762 | */ | |
1763 | if (desc->async_tx.callback) | |
1764 | desc->async_tx.callback( | |
1765 | desc->async_tx.callback_param); | |
1766 | ||
1767 | /* unmap dma addresses | |
1768 | * (unmap_single vs unmap_page?) | |
1769 | * | |
1770 | * actually, ppc's dma_unmap_page() functions are empty, so | |
1771 | * the following code is just for the sake of completeness | |
1772 | */ | |
1773 | if (chan && chan->needs_unmap && desc->group_head && | |
1774 | desc->unmap_len) { | |
1775 | struct ppc440spe_adma_desc_slot *unmap = | |
1776 | desc->group_head; | |
1777 | /* assume 1 slot per op always */ | |
1778 | u32 slot_count = unmap->slot_cnt; | |
1779 | ||
1780 | /* Run through the group list and unmap addresses */ | |
1781 | for (i = 0; i < slot_count; i++) { | |
1782 | BUG_ON(!unmap); | |
1783 | ppc440spe_adma_unmap(chan, unmap); | |
1784 | unmap = unmap->hw_next; | |
1785 | } | |
1786 | } | |
1787 | } | |
1788 | ||
1789 | /* run dependent operations */ | |
1790 | dma_run_dependencies(&desc->async_tx); | |
1791 | ||
1792 | return cookie; | |
1793 | } | |
1794 | ||
1795 | /** | |
1796 | * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set) | |
1797 | */ | |
1798 | static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc, | |
1799 | struct ppc440spe_adma_chan *chan) | |
1800 | { | |
1801 | /* the client is allowed to attach dependent operations | |
1802 | * until 'ack' is set | |
1803 | */ | |
1804 | if (!async_tx_test_ack(&desc->async_tx)) | |
1805 | return 0; | |
1806 | ||
1807 | /* leave the last descriptor in the chain | |
1808 | * so we can append to it | |
1809 | */ | |
1810 | if (list_is_last(&desc->chain_node, &chan->chain) || | |
1811 | desc->phys == ppc440spe_chan_get_current_descriptor(chan)) | |
1812 | return 1; | |
1813 | ||
1814 | if (chan->device->id != PPC440SPE_XOR_ID) { | |
1815 | /* our DMA interrupt handler clears opc field of | |
1816 | * each processed descriptor. For all types of | |
1817 | * operations except for ZeroSum we do not actually | |
1818 | * need ack from the interrupt handler. ZeroSum is a | |
1819 | * special case since the result of this operation | |
1820 | * is available from the handler only, so if we see | |
1821 | * such type of descriptor (which is unprocessed yet) | |
1822 | * then leave it in chain. | |
1823 | */ | |
1824 | struct dma_cdb *cdb = desc->hw_desc; | |
1825 | if (cdb->opc == DMA_CDB_OPC_DCHECK128) | |
1826 | return 1; | |
1827 | } | |
1828 | ||
1829 | dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n", | |
1830 | desc->phys, desc->idx, desc->slots_per_op); | |
1831 | ||
1832 | list_del(&desc->chain_node); | |
1833 | ppc440spe_adma_free_slots(desc, chan); | |
1834 | return 0; | |
1835 | } | |
1836 | ||
1837 | /** | |
1838 | * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine | |
1839 | * which runs through the channel CDBs list until reach the descriptor | |
1840 | * currently processed. When routine determines that all CDBs of group | |
1841 | * are completed then corresponding callbacks (if any) are called and slots | |
1842 | * are freed. | |
1843 | */ | |
1844 | static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |
1845 | { | |
1846 | struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL; | |
1847 | dma_cookie_t cookie = 0; | |
1848 | u32 current_desc = ppc440spe_chan_get_current_descriptor(chan); | |
1849 | int busy = ppc440spe_chan_is_busy(chan); | |
1850 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; | |
1851 | ||
1852 | dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n", | |
1853 | chan->device->id, __func__); | |
1854 | ||
1855 | if (!current_desc) { | |
1856 | /* There were no transactions yet, so | |
1857 | * nothing to clean | |
1858 | */ | |
1859 | return; | |
1860 | } | |
1861 | ||
1862 | /* free completed slots from the chain starting with | |
1863 | * the oldest descriptor | |
1864 | */ | |
1865 | list_for_each_entry_safe(iter, _iter, &chan->chain, | |
1866 | chain_node) { | |
1867 | dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d " | |
1868 | "busy: %d this_desc: %#llx next_desc: %#x " | |
1869 | "cur: %#x ack: %d\n", | |
1870 | iter->async_tx.cookie, iter->idx, busy, iter->phys, | |
1871 | ppc440spe_desc_get_link(iter, chan), current_desc, | |
1872 | async_tx_test_ack(&iter->async_tx)); | |
1873 | prefetch(_iter); | |
1874 | prefetch(&_iter->async_tx); | |
1875 | ||
1876 | /* do not advance past the current descriptor loaded into the | |
1877 | * hardware channel,subsequent descriptors are either in process | |
1878 | * or have not been submitted | |
1879 | */ | |
1880 | if (seen_current) | |
1881 | break; | |
1882 | ||
1883 | /* stop the search if we reach the current descriptor and the | |
1884 | * channel is busy, or if it appears that the current descriptor | |
1885 | * needs to be re-read (i.e. has been appended to) | |
1886 | */ | |
1887 | if (iter->phys == current_desc) { | |
1888 | BUG_ON(seen_current++); | |
1889 | if (busy || ppc440spe_desc_get_link(iter, chan)) { | |
1890 | /* not all descriptors of the group have | |
1891 | * been completed; exit. | |
1892 | */ | |
1893 | break; | |
1894 | } | |
1895 | } | |
1896 | ||
1897 | /* detect the start of a group transaction */ | |
1898 | if (!slot_cnt && !slots_per_op) { | |
1899 | slot_cnt = iter->slot_cnt; | |
1900 | slots_per_op = iter->slots_per_op; | |
1901 | if (slot_cnt <= slots_per_op) { | |
1902 | slot_cnt = 0; | |
1903 | slots_per_op = 0; | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | if (slot_cnt) { | |
1908 | if (!group_start) | |
1909 | group_start = iter; | |
1910 | slot_cnt -= slots_per_op; | |
1911 | } | |
1912 | ||
1913 | /* all the members of a group are complete */ | |
1914 | if (slots_per_op != 0 && slot_cnt == 0) { | |
1915 | struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter; | |
1916 | int end_of_chain = 0; | |
1917 | ||
1918 | /* clean up the group */ | |
1919 | slot_cnt = group_start->slot_cnt; | |
1920 | grp_iter = group_start; | |
1921 | list_for_each_entry_safe_from(grp_iter, _grp_iter, | |
1922 | &chan->chain, chain_node) { | |
1923 | ||
1924 | cookie = ppc440spe_adma_run_tx_complete_actions( | |
1925 | grp_iter, chan, cookie); | |
1926 | ||
1927 | slot_cnt -= slots_per_op; | |
1928 | end_of_chain = ppc440spe_adma_clean_slot( | |
1929 | grp_iter, chan); | |
1930 | if (end_of_chain && slot_cnt) { | |
1931 | /* Should wait for ZeroSum completion */ | |
1932 | if (cookie > 0) | |
1933 | chan->completed_cookie = cookie; | |
1934 | return; | |
1935 | } | |
1936 | ||
1937 | if (slot_cnt == 0 || end_of_chain) | |
1938 | break; | |
1939 | } | |
1940 | ||
1941 | /* the group should be complete at this point */ | |
1942 | BUG_ON(slot_cnt); | |
1943 | ||
1944 | slots_per_op = 0; | |
1945 | group_start = NULL; | |
1946 | if (end_of_chain) | |
1947 | break; | |
1948 | else | |
1949 | continue; | |
1950 | } else if (slots_per_op) /* wait for group completion */ | |
1951 | continue; | |
1952 | ||
1953 | cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan, | |
1954 | cookie); | |
1955 | ||
1956 | if (ppc440spe_adma_clean_slot(iter, chan)) | |
1957 | break; | |
1958 | } | |
1959 | ||
1960 | BUG_ON(!seen_current); | |
1961 | ||
1962 | if (cookie > 0) { | |
1963 | chan->completed_cookie = cookie; | |
1964 | pr_debug("\tcompleted cookie %d\n", cookie); | |
1965 | } | |
1966 | ||
1967 | } | |
1968 | ||
1969 | /** | |
1970 | * ppc440spe_adma_tasklet - clean up watch-dog initiator | |
1971 | */ | |
1972 | static void ppc440spe_adma_tasklet(unsigned long data) | |
1973 | { | |
1974 | struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data; | |
1975 | ||
1976 | spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING); | |
1977 | __ppc440spe_adma_slot_cleanup(chan); | |
1978 | spin_unlock(&chan->lock); | |
1979 | } | |
1980 | ||
1981 | /** | |
1982 | * ppc440spe_adma_slot_cleanup - clean up scheduled initiator | |
1983 | */ | |
1984 | static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) | |
1985 | { | |
1986 | spin_lock_bh(&chan->lock); | |
1987 | __ppc440spe_adma_slot_cleanup(chan); | |
1988 | spin_unlock_bh(&chan->lock); | |
1989 | } | |
1990 | ||
1991 | /** | |
1992 | * ppc440spe_adma_alloc_slots - allocate free slots (if any) | |
1993 | */ | |
1994 | static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots( | |
1995 | struct ppc440spe_adma_chan *chan, int num_slots, | |
1996 | int slots_per_op) | |
1997 | { | |
1998 | struct ppc440spe_adma_desc_slot *iter = NULL, *_iter; | |
1999 | struct ppc440spe_adma_desc_slot *alloc_start = NULL; | |
2000 | struct list_head chain = LIST_HEAD_INIT(chain); | |
2001 | int slots_found, retry = 0; | |
2002 | ||
2003 | ||
2004 | BUG_ON(!num_slots || !slots_per_op); | |
2005 | /* start search from the last allocated descrtiptor | |
2006 | * if a contiguous allocation can not be found start searching | |
2007 | * from the beginning of the list | |
2008 | */ | |
2009 | retry: | |
2010 | slots_found = 0; | |
2011 | if (retry == 0) | |
2012 | iter = chan->last_used; | |
2013 | else | |
2014 | iter = list_entry(&chan->all_slots, | |
2015 | struct ppc440spe_adma_desc_slot, | |
2016 | slot_node); | |
2017 | list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots, | |
2018 | slot_node) { | |
2019 | prefetch(_iter); | |
2020 | prefetch(&_iter->async_tx); | |
2021 | if (iter->slots_per_op) { | |
2022 | slots_found = 0; | |
2023 | continue; | |
2024 | } | |
2025 | ||
2026 | /* start the allocation if the slot is correctly aligned */ | |
2027 | if (!slots_found++) | |
2028 | alloc_start = iter; | |
2029 | ||
2030 | if (slots_found == num_slots) { | |
2031 | struct ppc440spe_adma_desc_slot *alloc_tail = NULL; | |
2032 | struct ppc440spe_adma_desc_slot *last_used = NULL; | |
2033 | ||
2034 | iter = alloc_start; | |
2035 | while (num_slots) { | |
2036 | int i; | |
2037 | /* pre-ack all but the last descriptor */ | |
2038 | if (num_slots != slots_per_op) | |
2039 | async_tx_ack(&iter->async_tx); | |
2040 | ||
2041 | list_add_tail(&iter->chain_node, &chain); | |
2042 | alloc_tail = iter; | |
2043 | iter->async_tx.cookie = 0; | |
2044 | iter->hw_next = NULL; | |
2045 | iter->flags = 0; | |
2046 | iter->slot_cnt = num_slots; | |
2047 | iter->xor_check_result = NULL; | |
2048 | for (i = 0; i < slots_per_op; i++) { | |
2049 | iter->slots_per_op = slots_per_op - i; | |
2050 | last_used = iter; | |
2051 | iter = list_entry(iter->slot_node.next, | |
2052 | struct ppc440spe_adma_desc_slot, | |
2053 | slot_node); | |
2054 | } | |
2055 | num_slots -= slots_per_op; | |
2056 | } | |
2057 | alloc_tail->group_head = alloc_start; | |
2058 | alloc_tail->async_tx.cookie = -EBUSY; | |
2059 | list_splice(&chain, &alloc_tail->group_list); | |
2060 | chan->last_used = last_used; | |
2061 | return alloc_tail; | |
2062 | } | |
2063 | } | |
2064 | if (!retry++) | |
2065 | goto retry; | |
2066 | ||
2067 | /* try to free some slots if the allocation fails */ | |
2068 | tasklet_schedule(&chan->irq_tasklet); | |
2069 | return NULL; | |
2070 | } | |
2071 | ||
2072 | /** | |
2073 | * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots | |
2074 | */ | |
2075 | static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan) | |
2076 | { | |
2077 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2078 | struct ppc440spe_adma_desc_slot *slot = NULL; | |
2079 | char *hw_desc; | |
2080 | int i, db_sz; | |
2081 | int init; | |
2082 | ||
2083 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2084 | init = ppc440spe_chan->slots_allocated ? 0 : 1; | |
2085 | chan->chan_id = ppc440spe_chan->device->id; | |
2086 | ||
2087 | /* Allocate descriptor slots */ | |
2088 | i = ppc440spe_chan->slots_allocated; | |
2089 | if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID) | |
2090 | db_sz = sizeof(struct dma_cdb); | |
2091 | else | |
2092 | db_sz = sizeof(struct xor_cb); | |
2093 | ||
2094 | for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) { | |
2095 | slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot), | |
2096 | GFP_KERNEL); | |
2097 | if (!slot) { | |
2098 | printk(KERN_INFO "SPE ADMA Channel only initialized" | |
2099 | " %d descriptor slots", i--); | |
2100 | break; | |
2101 | } | |
2102 | ||
2103 | hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt; | |
2104 | slot->hw_desc = (void *) &hw_desc[i * db_sz]; | |
2105 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
2106 | slot->async_tx.tx_submit = ppc440spe_adma_tx_submit; | |
2107 | INIT_LIST_HEAD(&slot->chain_node); | |
2108 | INIT_LIST_HEAD(&slot->slot_node); | |
2109 | INIT_LIST_HEAD(&slot->group_list); | |
2110 | slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz; | |
2111 | slot->idx = i; | |
2112 | ||
2113 | spin_lock_bh(&ppc440spe_chan->lock); | |
2114 | ppc440spe_chan->slots_allocated++; | |
2115 | list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots); | |
2116 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2117 | } | |
2118 | ||
2119 | if (i && !ppc440spe_chan->last_used) { | |
2120 | ppc440spe_chan->last_used = | |
2121 | list_entry(ppc440spe_chan->all_slots.next, | |
2122 | struct ppc440spe_adma_desc_slot, | |
2123 | slot_node); | |
2124 | } | |
2125 | ||
2126 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2127 | "ppc440spe adma%d: allocated %d descriptor slots\n", | |
2128 | ppc440spe_chan->device->id, i); | |
2129 | ||
2130 | /* initialize the channel and the chain with a null operation */ | |
2131 | if (init) { | |
2132 | switch (ppc440spe_chan->device->id) { | |
2133 | case PPC440SPE_DMA0_ID: | |
2134 | case PPC440SPE_DMA1_ID: | |
2135 | ppc440spe_chan->hw_chain_inited = 0; | |
2136 | /* Use WXOR for self-testing */ | |
2137 | if (!ppc440spe_r6_tchan) | |
2138 | ppc440spe_r6_tchan = ppc440spe_chan; | |
2139 | break; | |
2140 | case PPC440SPE_XOR_ID: | |
2141 | ppc440spe_chan_start_null_xor(ppc440spe_chan); | |
2142 | break; | |
2143 | default: | |
2144 | BUG(); | |
2145 | } | |
2146 | ppc440spe_chan->needs_unmap = 1; | |
2147 | } | |
2148 | ||
2149 | return (i > 0) ? i : -ENOMEM; | |
2150 | } | |
2151 | ||
2152 | /** | |
2153 | * ppc440spe_desc_assign_cookie - assign a cookie | |
2154 | */ | |
2155 | static dma_cookie_t ppc440spe_desc_assign_cookie( | |
2156 | struct ppc440spe_adma_chan *chan, | |
2157 | struct ppc440spe_adma_desc_slot *desc) | |
2158 | { | |
2159 | dma_cookie_t cookie = chan->common.cookie; | |
2160 | ||
2161 | cookie++; | |
2162 | if (cookie < 0) | |
2163 | cookie = 1; | |
2164 | chan->common.cookie = desc->async_tx.cookie = cookie; | |
2165 | return cookie; | |
2166 | } | |
2167 | ||
2168 | /** | |
2169 | * ppc440spe_rxor_set_region_data - | |
2170 | */ | |
2171 | static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, | |
2172 | u8 xor_arg_no, u32 mask) | |
2173 | { | |
2174 | struct xor_cb *xcb = desc->hw_desc; | |
2175 | ||
2176 | xcb->ops[xor_arg_no].h |= mask; | |
2177 | } | |
2178 | ||
2179 | /** | |
2180 | * ppc440spe_rxor_set_src - | |
2181 | */ | |
2182 | static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc, | |
2183 | u8 xor_arg_no, dma_addr_t addr) | |
2184 | { | |
2185 | struct xor_cb *xcb = desc->hw_desc; | |
2186 | ||
2187 | xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE; | |
2188 | xcb->ops[xor_arg_no].l = addr; | |
2189 | } | |
2190 | ||
2191 | /** | |
2192 | * ppc440spe_rxor_set_mult - | |
2193 | */ | |
2194 | static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc, | |
2195 | u8 xor_arg_no, u8 idx, u8 mult) | |
2196 | { | |
2197 | struct xor_cb *xcb = desc->hw_desc; | |
2198 | ||
2199 | xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8); | |
2200 | } | |
2201 | ||
2202 | /** | |
2203 | * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold | |
2204 | * has been achieved | |
2205 | */ | |
2206 | static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan) | |
2207 | { | |
2208 | dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n", | |
2209 | chan->device->id, chan->pending); | |
2210 | ||
2211 | if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) { | |
2212 | chan->pending = 0; | |
2213 | ppc440spe_chan_append(chan); | |
2214 | } | |
2215 | } | |
2216 | ||
2217 | /** | |
2218 | * ppc440spe_adma_tx_submit - submit new descriptor group to the channel | |
2219 | * (it's not necessary that descriptors will be submitted to the h/w | |
2220 | * chains too right now) | |
2221 | */ | |
2222 | static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |
2223 | { | |
2224 | struct ppc440spe_adma_desc_slot *sw_desc; | |
2225 | struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan); | |
2226 | struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail; | |
2227 | int slot_cnt; | |
2228 | int slots_per_op; | |
2229 | dma_cookie_t cookie; | |
2230 | ||
2231 | sw_desc = tx_to_ppc440spe_adma_slot(tx); | |
2232 | ||
2233 | group_start = sw_desc->group_head; | |
2234 | slot_cnt = group_start->slot_cnt; | |
2235 | slots_per_op = group_start->slots_per_op; | |
2236 | ||
2237 | spin_lock_bh(&chan->lock); | |
2238 | ||
2239 | cookie = ppc440spe_desc_assign_cookie(chan, sw_desc); | |
2240 | ||
2241 | if (unlikely(list_empty(&chan->chain))) { | |
2242 | /* first peer */ | |
2243 | list_splice_init(&sw_desc->group_list, &chan->chain); | |
2244 | chan_first_cdb[chan->device->id] = group_start; | |
2245 | } else { | |
2246 | /* isn't first peer, bind CDBs to chain */ | |
2247 | old_chain_tail = list_entry(chan->chain.prev, | |
2248 | struct ppc440spe_adma_desc_slot, | |
2249 | chain_node); | |
2250 | list_splice_init(&sw_desc->group_list, | |
2251 | &old_chain_tail->chain_node); | |
2252 | /* fix up the hardware chain */ | |
2253 | ppc440spe_desc_set_link(chan, old_chain_tail, group_start); | |
2254 | } | |
2255 | ||
2256 | /* increment the pending count by the number of operations */ | |
2257 | chan->pending += slot_cnt / slots_per_op; | |
2258 | ppc440spe_adma_check_threshold(chan); | |
2259 | spin_unlock_bh(&chan->lock); | |
2260 | ||
2261 | dev_dbg(chan->device->common.dev, | |
2262 | "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n", | |
2263 | chan->device->id, __func__, | |
2264 | sw_desc->async_tx.cookie, sw_desc->idx, sw_desc); | |
2265 | ||
2266 | return cookie; | |
2267 | } | |
2268 | ||
2269 | /** | |
2270 | * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation | |
2271 | */ | |
2272 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt( | |
2273 | struct dma_chan *chan, unsigned long flags) | |
2274 | { | |
2275 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2276 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | |
2277 | int slot_cnt, slots_per_op; | |
2278 | ||
2279 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2280 | ||
2281 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2282 | "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id, | |
2283 | __func__); | |
2284 | ||
2285 | spin_lock_bh(&ppc440spe_chan->lock); | |
2286 | slot_cnt = slots_per_op = 1; | |
2287 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | |
2288 | slots_per_op); | |
2289 | if (sw_desc) { | |
2290 | group_start = sw_desc->group_head; | |
2291 | ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan); | |
2292 | group_start->unmap_len = 0; | |
2293 | sw_desc->async_tx.flags = flags; | |
2294 | } | |
2295 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2296 | ||
2297 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2298 | } | |
2299 | ||
2300 | /** | |
2301 | * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation | |
2302 | */ | |
2303 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy( | |
2304 | struct dma_chan *chan, dma_addr_t dma_dest, | |
2305 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
2306 | { | |
2307 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2308 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | |
2309 | int slot_cnt, slots_per_op; | |
2310 | ||
2311 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2312 | ||
2313 | if (unlikely(!len)) | |
2314 | return NULL; | |
2315 | ||
2316 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | |
2317 | ||
2318 | spin_lock_bh(&ppc440spe_chan->lock); | |
2319 | ||
2320 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2321 | "ppc440spe adma%d: %s len: %u int_en %d\n", | |
2322 | ppc440spe_chan->device->id, __func__, len, | |
2323 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | |
2324 | slot_cnt = slots_per_op = 1; | |
2325 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | |
2326 | slots_per_op); | |
2327 | if (sw_desc) { | |
2328 | group_start = sw_desc->group_head; | |
2329 | ppc440spe_desc_init_memcpy(group_start, flags); | |
2330 | ppc440spe_adma_set_dest(group_start, dma_dest, 0); | |
2331 | ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0); | |
2332 | ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); | |
2333 | sw_desc->unmap_len = len; | |
2334 | sw_desc->async_tx.flags = flags; | |
2335 | } | |
2336 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2337 | ||
2338 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2339 | } | |
2340 | ||
2341 | /** | |
2342 | * ppc440spe_adma_prep_dma_memset - prepare CDB for a MEMSET operation | |
2343 | */ | |
2344 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memset( | |
2345 | struct dma_chan *chan, dma_addr_t dma_dest, int value, | |
2346 | size_t len, unsigned long flags) | |
2347 | { | |
2348 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2349 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | |
2350 | int slot_cnt, slots_per_op; | |
2351 | ||
2352 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2353 | ||
2354 | if (unlikely(!len)) | |
2355 | return NULL; | |
2356 | ||
2357 | BUG_ON(unlikely(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT)); | |
2358 | ||
2359 | spin_lock_bh(&ppc440spe_chan->lock); | |
2360 | ||
2361 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2362 | "ppc440spe adma%d: %s cal: %u len: %u int_en %d\n", | |
2363 | ppc440spe_chan->device->id, __func__, value, len, | |
2364 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | |
2365 | ||
2366 | slot_cnt = slots_per_op = 1; | |
2367 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | |
2368 | slots_per_op); | |
2369 | if (sw_desc) { | |
2370 | group_start = sw_desc->group_head; | |
2371 | ppc440spe_desc_init_memset(group_start, value, flags); | |
2372 | ppc440spe_adma_set_dest(group_start, dma_dest, 0); | |
2373 | ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); | |
2374 | sw_desc->unmap_len = len; | |
2375 | sw_desc->async_tx.flags = flags; | |
2376 | } | |
2377 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2378 | ||
2379 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2380 | } | |
2381 | ||
2382 | /** | |
2383 | * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation | |
2384 | */ | |
2385 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor( | |
2386 | struct dma_chan *chan, dma_addr_t dma_dest, | |
2387 | dma_addr_t *dma_src, u32 src_cnt, size_t len, | |
2388 | unsigned long flags) | |
2389 | { | |
2390 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2391 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | |
2392 | int slot_cnt, slots_per_op; | |
2393 | ||
2394 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2395 | ||
2396 | ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id, | |
2397 | dma_dest, dma_src, src_cnt)); | |
2398 | if (unlikely(!len)) | |
2399 | return NULL; | |
2400 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | |
2401 | ||
2402 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2403 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", | |
2404 | ppc440spe_chan->device->id, __func__, src_cnt, len, | |
2405 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | |
2406 | ||
2407 | spin_lock_bh(&ppc440spe_chan->lock); | |
2408 | slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op); | |
2409 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | |
2410 | slots_per_op); | |
2411 | if (sw_desc) { | |
2412 | group_start = sw_desc->group_head; | |
2413 | ppc440spe_desc_init_xor(group_start, src_cnt, flags); | |
2414 | ppc440spe_adma_set_dest(group_start, dma_dest, 0); | |
2415 | while (src_cnt--) | |
2416 | ppc440spe_adma_memcpy_xor_set_src(group_start, | |
2417 | dma_src[src_cnt], src_cnt); | |
2418 | ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len); | |
2419 | sw_desc->unmap_len = len; | |
2420 | sw_desc->async_tx.flags = flags; | |
2421 | } | |
2422 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2423 | ||
2424 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2425 | } | |
2426 | ||
2427 | static inline void | |
2428 | ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc, | |
2429 | int src_cnt); | |
2430 | static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor); | |
2431 | ||
2432 | /** | |
2433 | * ppc440spe_adma_init_dma2rxor_slot - | |
2434 | */ | |
2435 | static void ppc440spe_adma_init_dma2rxor_slot( | |
2436 | struct ppc440spe_adma_desc_slot *desc, | |
2437 | dma_addr_t *src, int src_cnt) | |
2438 | { | |
2439 | int i; | |
2440 | ||
2441 | /* initialize CDB */ | |
2442 | for (i = 0; i < src_cnt; i++) { | |
2443 | ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i, | |
2444 | desc->src_cnt, (u32)src[i]); | |
2445 | } | |
2446 | } | |
2447 | ||
2448 | /** | |
2449 | * ppc440spe_dma01_prep_mult - | |
2450 | * for Q operation where destination is also the source | |
2451 | */ | |
2452 | static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult( | |
2453 | struct ppc440spe_adma_chan *ppc440spe_chan, | |
2454 | dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, | |
2455 | const unsigned char *scf, size_t len, unsigned long flags) | |
2456 | { | |
2457 | struct ppc440spe_adma_desc_slot *sw_desc = NULL; | |
2458 | unsigned long op = 0; | |
2459 | int slot_cnt; | |
2460 | ||
2461 | set_bit(PPC440SPE_DESC_WXOR, &op); | |
2462 | slot_cnt = 2; | |
2463 | ||
2464 | spin_lock_bh(&ppc440spe_chan->lock); | |
2465 | ||
2466 | /* use WXOR, each descriptor occupies one slot */ | |
2467 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); | |
2468 | if (sw_desc) { | |
2469 | struct ppc440spe_adma_chan *chan; | |
2470 | struct ppc440spe_adma_desc_slot *iter; | |
2471 | struct dma_cdb *hw_desc; | |
2472 | ||
2473 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
2474 | set_bits(op, &sw_desc->flags); | |
2475 | sw_desc->src_cnt = src_cnt; | |
2476 | sw_desc->dst_cnt = dst_cnt; | |
2477 | /* First descriptor, zero data in the destination and copy it | |
2478 | * to q page using MULTICAST transfer. | |
2479 | */ | |
2480 | iter = list_first_entry(&sw_desc->group_list, | |
2481 | struct ppc440spe_adma_desc_slot, | |
2482 | chain_node); | |
2483 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
2484 | /* set 'next' pointer */ | |
2485 | iter->hw_next = list_entry(iter->chain_node.next, | |
2486 | struct ppc440spe_adma_desc_slot, | |
2487 | chain_node); | |
2488 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2489 | hw_desc = iter->hw_desc; | |
2490 | hw_desc->opc = DMA_CDB_OPC_MULTICAST; | |
2491 | ||
2492 | ppc440spe_desc_set_dest_addr(iter, chan, | |
2493 | DMA_CUED_XOR_BASE, dst[0], 0); | |
2494 | ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1); | |
2495 | ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, | |
2496 | src[0]); | |
2497 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); | |
2498 | iter->unmap_len = len; | |
2499 | ||
2500 | /* | |
2501 | * Second descriptor, multiply data from the q page | |
2502 | * and store the result in real destination. | |
2503 | */ | |
2504 | iter = list_first_entry(&iter->chain_node, | |
2505 | struct ppc440spe_adma_desc_slot, | |
2506 | chain_node); | |
2507 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
2508 | iter->hw_next = NULL; | |
2509 | if (flags & DMA_PREP_INTERRUPT) | |
2510 | set_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2511 | else | |
2512 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2513 | ||
2514 | hw_desc = iter->hw_desc; | |
2515 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
2516 | ppc440spe_desc_set_src_addr(iter, chan, 0, | |
2517 | DMA_CUED_XOR_HB, dst[1]); | |
2518 | ppc440spe_desc_set_dest_addr(iter, chan, | |
2519 | DMA_CUED_XOR_BASE, dst[0], 0); | |
2520 | ||
2521 | ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, | |
2522 | DMA_CDB_SG_DST1, scf[0]); | |
2523 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); | |
2524 | iter->unmap_len = len; | |
2525 | sw_desc->async_tx.flags = flags; | |
2526 | } | |
2527 | ||
2528 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2529 | ||
2530 | return sw_desc; | |
2531 | } | |
2532 | ||
2533 | /** | |
2534 | * ppc440spe_dma01_prep_sum_product - | |
2535 | * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also | |
2536 | * the source. | |
2537 | */ | |
2538 | static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product( | |
2539 | struct ppc440spe_adma_chan *ppc440spe_chan, | |
2540 | dma_addr_t *dst, dma_addr_t *src, int src_cnt, | |
2541 | const unsigned char *scf, size_t len, unsigned long flags) | |
2542 | { | |
2543 | struct ppc440spe_adma_desc_slot *sw_desc = NULL; | |
2544 | unsigned long op = 0; | |
2545 | int slot_cnt; | |
2546 | ||
2547 | set_bit(PPC440SPE_DESC_WXOR, &op); | |
2548 | slot_cnt = 3; | |
2549 | ||
2550 | spin_lock_bh(&ppc440spe_chan->lock); | |
2551 | ||
2552 | /* WXOR, each descriptor occupies one slot */ | |
2553 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); | |
2554 | if (sw_desc) { | |
2555 | struct ppc440spe_adma_chan *chan; | |
2556 | struct ppc440spe_adma_desc_slot *iter; | |
2557 | struct dma_cdb *hw_desc; | |
2558 | ||
2559 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
2560 | set_bits(op, &sw_desc->flags); | |
2561 | sw_desc->src_cnt = src_cnt; | |
2562 | sw_desc->dst_cnt = 1; | |
2563 | /* 1st descriptor, src[1] data to q page and zero destination */ | |
2564 | iter = list_first_entry(&sw_desc->group_list, | |
2565 | struct ppc440spe_adma_desc_slot, | |
2566 | chain_node); | |
2567 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
2568 | iter->hw_next = list_entry(iter->chain_node.next, | |
2569 | struct ppc440spe_adma_desc_slot, | |
2570 | chain_node); | |
2571 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2572 | hw_desc = iter->hw_desc; | |
2573 | hw_desc->opc = DMA_CDB_OPC_MULTICAST; | |
2574 | ||
2575 | ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, | |
2576 | *dst, 0); | |
2577 | ppc440spe_desc_set_dest_addr(iter, chan, 0, | |
2578 | ppc440spe_chan->qdest, 1); | |
2579 | ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, | |
2580 | src[1]); | |
2581 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); | |
2582 | iter->unmap_len = len; | |
2583 | ||
2584 | /* 2nd descriptor, multiply src[1] data and store the | |
2585 | * result in destination */ | |
2586 | iter = list_first_entry(&iter->chain_node, | |
2587 | struct ppc440spe_adma_desc_slot, | |
2588 | chain_node); | |
2589 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
2590 | /* set 'next' pointer */ | |
2591 | iter->hw_next = list_entry(iter->chain_node.next, | |
2592 | struct ppc440spe_adma_desc_slot, | |
2593 | chain_node); | |
2594 | if (flags & DMA_PREP_INTERRUPT) | |
2595 | set_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2596 | else | |
2597 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2598 | ||
2599 | hw_desc = iter->hw_desc; | |
2600 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
2601 | ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, | |
2602 | ppc440spe_chan->qdest); | |
2603 | ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, | |
2604 | *dst, 0); | |
2605 | ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, | |
2606 | DMA_CDB_SG_DST1, scf[1]); | |
2607 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); | |
2608 | iter->unmap_len = len; | |
2609 | ||
2610 | /* | |
2611 | * 3rd descriptor, multiply src[0] data and xor it | |
2612 | * with destination | |
2613 | */ | |
2614 | iter = list_first_entry(&iter->chain_node, | |
2615 | struct ppc440spe_adma_desc_slot, | |
2616 | chain_node); | |
2617 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
2618 | iter->hw_next = NULL; | |
2619 | if (flags & DMA_PREP_INTERRUPT) | |
2620 | set_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2621 | else | |
2622 | clear_bit(PPC440SPE_DESC_INT, &iter->flags); | |
2623 | ||
2624 | hw_desc = iter->hw_desc; | |
2625 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
2626 | ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, | |
2627 | src[0]); | |
2628 | ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, | |
2629 | *dst, 0); | |
2630 | ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, | |
2631 | DMA_CDB_SG_DST1, scf[0]); | |
2632 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); | |
2633 | iter->unmap_len = len; | |
2634 | sw_desc->async_tx.flags = flags; | |
2635 | } | |
2636 | ||
2637 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2638 | ||
2639 | return sw_desc; | |
2640 | } | |
2641 | ||
2642 | static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq( | |
2643 | struct ppc440spe_adma_chan *ppc440spe_chan, | |
2644 | dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, | |
2645 | const unsigned char *scf, size_t len, unsigned long flags) | |
2646 | { | |
2647 | int slot_cnt; | |
2648 | struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; | |
2649 | unsigned long op = 0; | |
2650 | unsigned char mult = 1; | |
2651 | ||
2652 | pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n", | |
2653 | __func__, dst_cnt, src_cnt, len); | |
2654 | /* select operations WXOR/RXOR depending on the | |
2655 | * source addresses of operators and the number | |
2656 | * of destinations (RXOR support only Q-parity calculations) | |
2657 | */ | |
2658 | set_bit(PPC440SPE_DESC_WXOR, &op); | |
2659 | if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) { | |
2660 | /* no active RXOR; | |
2661 | * do RXOR if: | |
2662 | * - there are more than 1 source, | |
2663 | * - len is aligned on 512-byte boundary, | |
2664 | * - source addresses fit to one of 4 possible regions. | |
2665 | */ | |
2666 | if (src_cnt > 1 && | |
2667 | !(len & MQ0_CF2H_RXOR_BS_MASK) && | |
2668 | (src[0] + len) == src[1]) { | |
2669 | /* may do RXOR R1 R2 */ | |
2670 | set_bit(PPC440SPE_DESC_RXOR, &op); | |
2671 | if (src_cnt != 2) { | |
2672 | /* may try to enhance region of RXOR */ | |
2673 | if ((src[1] + len) == src[2]) { | |
2674 | /* do RXOR R1 R2 R3 */ | |
2675 | set_bit(PPC440SPE_DESC_RXOR123, | |
2676 | &op); | |
2677 | } else if ((src[1] + len * 2) == src[2]) { | |
2678 | /* do RXOR R1 R2 R4 */ | |
2679 | set_bit(PPC440SPE_DESC_RXOR124, &op); | |
2680 | } else if ((src[1] + len * 3) == src[2]) { | |
2681 | /* do RXOR R1 R2 R5 */ | |
2682 | set_bit(PPC440SPE_DESC_RXOR125, | |
2683 | &op); | |
2684 | } else { | |
2685 | /* do RXOR R1 R2 */ | |
2686 | set_bit(PPC440SPE_DESC_RXOR12, | |
2687 | &op); | |
2688 | } | |
2689 | } else { | |
2690 | /* do RXOR R1 R2 */ | |
2691 | set_bit(PPC440SPE_DESC_RXOR12, &op); | |
2692 | } | |
2693 | } | |
2694 | ||
2695 | if (!test_bit(PPC440SPE_DESC_RXOR, &op)) { | |
2696 | /* can not do this operation with RXOR */ | |
2697 | clear_bit(PPC440SPE_RXOR_RUN, | |
2698 | &ppc440spe_rxor_state); | |
2699 | } else { | |
2700 | /* can do; set block size right now */ | |
2701 | ppc440spe_desc_set_rxor_block_size(len); | |
2702 | } | |
2703 | } | |
2704 | ||
2705 | /* Number of necessary slots depends on operation type selected */ | |
2706 | if (!test_bit(PPC440SPE_DESC_RXOR, &op)) { | |
2707 | /* This is a WXOR only chain. Need descriptors for each | |
2708 | * source to GF-XOR them with WXOR, and need descriptors | |
2709 | * for each destination to zero them with WXOR | |
2710 | */ | |
2711 | slot_cnt = src_cnt; | |
2712 | ||
2713 | if (flags & DMA_PREP_ZERO_P) { | |
2714 | slot_cnt++; | |
2715 | set_bit(PPC440SPE_ZERO_P, &op); | |
2716 | } | |
2717 | if (flags & DMA_PREP_ZERO_Q) { | |
2718 | slot_cnt++; | |
2719 | set_bit(PPC440SPE_ZERO_Q, &op); | |
2720 | } | |
2721 | } else { | |
2722 | /* Need 1/2 descriptor for RXOR operation, and | |
2723 | * need (src_cnt - (2 or 3)) for WXOR of sources | |
2724 | * remained (if any) | |
2725 | */ | |
2726 | slot_cnt = dst_cnt; | |
2727 | ||
2728 | if (flags & DMA_PREP_ZERO_P) | |
2729 | set_bit(PPC440SPE_ZERO_P, &op); | |
2730 | if (flags & DMA_PREP_ZERO_Q) | |
2731 | set_bit(PPC440SPE_ZERO_Q, &op); | |
2732 | ||
2733 | if (test_bit(PPC440SPE_DESC_RXOR12, &op)) | |
2734 | slot_cnt += src_cnt - 2; | |
2735 | else | |
2736 | slot_cnt += src_cnt - 3; | |
2737 | ||
2738 | /* Thus we have either RXOR only chain or | |
2739 | * mixed RXOR/WXOR | |
2740 | */ | |
2741 | if (slot_cnt == dst_cnt) | |
2742 | /* RXOR only chain */ | |
2743 | clear_bit(PPC440SPE_DESC_WXOR, &op); | |
2744 | } | |
2745 | ||
2746 | spin_lock_bh(&ppc440spe_chan->lock); | |
2747 | /* for both RXOR/WXOR each descriptor occupies one slot */ | |
2748 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); | |
2749 | if (sw_desc) { | |
2750 | ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt, | |
2751 | flags, op); | |
2752 | ||
2753 | /* setup dst/src/mult */ | |
2754 | pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n", | |
2755 | __func__, dst[0], dst[1]); | |
2756 | ppc440spe_adma_pq_set_dest(sw_desc, dst, flags); | |
2757 | while (src_cnt--) { | |
2758 | ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], | |
2759 | src_cnt); | |
2760 | ||
2761 | /* NOTE: "Multi = 0 is equivalent to = 1" as it | |
2762 | * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf | |
2763 | * doesn't work for RXOR with DMA0/1! Instead, multi=0 | |
2764 | * leads to zeroing source data after RXOR. | |
2765 | * So, for P case set-up mult=1 explicitly. | |
2766 | */ | |
2767 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
2768 | mult = scf[src_cnt]; | |
2769 | ppc440spe_adma_pq_set_src_mult(sw_desc, | |
2770 | mult, src_cnt, dst_cnt - 1); | |
2771 | } | |
2772 | ||
2773 | /* Setup byte count foreach slot just allocated */ | |
2774 | sw_desc->async_tx.flags = flags; | |
2775 | list_for_each_entry(iter, &sw_desc->group_list, | |
2776 | chain_node) { | |
2777 | ppc440spe_desc_set_byte_count(iter, | |
2778 | ppc440spe_chan, len); | |
2779 | iter->unmap_len = len; | |
2780 | } | |
2781 | } | |
2782 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2783 | ||
2784 | return sw_desc; | |
2785 | } | |
2786 | ||
2787 | static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq( | |
2788 | struct ppc440spe_adma_chan *ppc440spe_chan, | |
2789 | dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt, | |
2790 | const unsigned char *scf, size_t len, unsigned long flags) | |
2791 | { | |
2792 | int slot_cnt, descs_per_op; | |
2793 | struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; | |
2794 | unsigned long op = 0; | |
2795 | unsigned char mult = 1; | |
2796 | ||
2797 | BUG_ON(!dst_cnt); | |
2798 | /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n", | |
2799 | __func__, dst_cnt, src_cnt, len);*/ | |
2800 | ||
2801 | spin_lock_bh(&ppc440spe_chan->lock); | |
2802 | descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len); | |
2803 | if (descs_per_op < 0) { | |
2804 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2805 | return NULL; | |
2806 | } | |
2807 | ||
2808 | /* depending on number of sources we have 1 or 2 RXOR chains */ | |
2809 | slot_cnt = descs_per_op * dst_cnt; | |
2810 | ||
2811 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1); | |
2812 | if (sw_desc) { | |
2813 | op = slot_cnt; | |
2814 | sw_desc->async_tx.flags = flags; | |
2815 | list_for_each_entry(iter, &sw_desc->group_list, chain_node) { | |
2816 | ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt, | |
2817 | --op ? 0 : flags); | |
2818 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, | |
2819 | len); | |
2820 | iter->unmap_len = len; | |
2821 | ||
2822 | ppc440spe_init_rxor_cursor(&(iter->rxor_cursor)); | |
2823 | iter->rxor_cursor.len = len; | |
2824 | iter->descs_per_op = descs_per_op; | |
2825 | } | |
2826 | op = 0; | |
2827 | list_for_each_entry(iter, &sw_desc->group_list, chain_node) { | |
2828 | op++; | |
2829 | if (op % descs_per_op == 0) | |
2830 | ppc440spe_adma_init_dma2rxor_slot(iter, src, | |
2831 | src_cnt); | |
2832 | if (likely(!list_is_last(&iter->chain_node, | |
2833 | &sw_desc->group_list))) { | |
2834 | /* set 'next' pointer */ | |
2835 | iter->hw_next = | |
2836 | list_entry(iter->chain_node.next, | |
2837 | struct ppc440spe_adma_desc_slot, | |
2838 | chain_node); | |
2839 | ppc440spe_xor_set_link(iter, iter->hw_next); | |
2840 | } else { | |
2841 | /* this is the last descriptor. */ | |
2842 | iter->hw_next = NULL; | |
2843 | } | |
2844 | } | |
2845 | ||
2846 | /* fixup head descriptor */ | |
2847 | sw_desc->dst_cnt = dst_cnt; | |
2848 | if (flags & DMA_PREP_ZERO_P) | |
2849 | set_bit(PPC440SPE_ZERO_P, &sw_desc->flags); | |
2850 | if (flags & DMA_PREP_ZERO_Q) | |
2851 | set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags); | |
2852 | ||
2853 | /* setup dst/src/mult */ | |
2854 | ppc440spe_adma_pq_set_dest(sw_desc, dst, flags); | |
2855 | ||
2856 | while (src_cnt--) { | |
2857 | /* handle descriptors (if dst_cnt == 2) inside | |
2858 | * the ppc440spe_adma_pq_set_srcxxx() functions | |
2859 | */ | |
2860 | ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt], | |
2861 | src_cnt); | |
2862 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
2863 | mult = scf[src_cnt]; | |
2864 | ppc440spe_adma_pq_set_src_mult(sw_desc, | |
2865 | mult, src_cnt, dst_cnt - 1); | |
2866 | } | |
2867 | } | |
2868 | spin_unlock_bh(&ppc440spe_chan->lock); | |
2869 | ppc440spe_desc_set_rxor_block_size(len); | |
2870 | return sw_desc; | |
2871 | } | |
2872 | ||
2873 | /** | |
2874 | * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation | |
2875 | */ | |
2876 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq( | |
2877 | struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |
2878 | unsigned int src_cnt, const unsigned char *scf, | |
2879 | size_t len, unsigned long flags) | |
2880 | { | |
2881 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2882 | struct ppc440spe_adma_desc_slot *sw_desc = NULL; | |
2883 | int dst_cnt = 0; | |
2884 | ||
2885 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2886 | ||
2887 | ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id, | |
2888 | dst, src, src_cnt)); | |
2889 | BUG_ON(!len); | |
2890 | BUG_ON(unlikely(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT)); | |
2891 | BUG_ON(!src_cnt); | |
2892 | ||
2893 | if (src_cnt == 1 && dst[1] == src[0]) { | |
2894 | dma_addr_t dest[2]; | |
2895 | ||
2896 | /* dst[1] is real destination (Q) */ | |
2897 | dest[0] = dst[1]; | |
2898 | /* this is the page to multicast source data to */ | |
2899 | dest[1] = ppc440spe_chan->qdest; | |
2900 | sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan, | |
2901 | dest, 2, src, src_cnt, scf, len, flags); | |
2902 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2903 | } | |
2904 | ||
2905 | if (src_cnt == 2 && dst[1] == src[1]) { | |
2906 | sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan, | |
2907 | &dst[1], src, 2, scf, len, flags); | |
2908 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2909 | } | |
2910 | ||
2911 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) { | |
2912 | BUG_ON(!dst[0]); | |
2913 | dst_cnt++; | |
2914 | flags |= DMA_PREP_ZERO_P; | |
2915 | } | |
2916 | ||
2917 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) { | |
2918 | BUG_ON(!dst[1]); | |
2919 | dst_cnt++; | |
2920 | flags |= DMA_PREP_ZERO_Q; | |
2921 | } | |
2922 | ||
2923 | BUG_ON(!dst_cnt); | |
2924 | ||
2925 | dev_dbg(ppc440spe_chan->device->common.dev, | |
2926 | "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n", | |
2927 | ppc440spe_chan->device->id, __func__, src_cnt, len, | |
2928 | flags & DMA_PREP_INTERRUPT ? 1 : 0); | |
2929 | ||
2930 | switch (ppc440spe_chan->device->id) { | |
2931 | case PPC440SPE_DMA0_ID: | |
2932 | case PPC440SPE_DMA1_ID: | |
2933 | sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan, | |
2934 | dst, dst_cnt, src, src_cnt, scf, | |
2935 | len, flags); | |
2936 | break; | |
2937 | ||
2938 | case PPC440SPE_XOR_ID: | |
2939 | sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan, | |
2940 | dst, dst_cnt, src, src_cnt, scf, | |
2941 | len, flags); | |
2942 | break; | |
2943 | } | |
2944 | ||
2945 | return sw_desc ? &sw_desc->async_tx : NULL; | |
2946 | } | |
2947 | ||
2948 | /** | |
2949 | * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for | |
2950 | * a PQ_ZERO_SUM operation | |
2951 | */ | |
2952 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum( | |
2953 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |
2954 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
2955 | enum sum_check_flags *pqres, unsigned long flags) | |
2956 | { | |
2957 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
2958 | struct ppc440spe_adma_desc_slot *sw_desc, *iter; | |
2959 | dma_addr_t pdest, qdest; | |
2960 | int slot_cnt, slots_per_op, idst, dst_cnt; | |
2961 | ||
2962 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
2963 | ||
2964 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
2965 | pdest = 0; | |
2966 | else | |
2967 | pdest = pq[0]; | |
2968 | ||
2969 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
2970 | qdest = 0; | |
2971 | else | |
2972 | qdest = pq[1]; | |
2973 | ||
2974 | ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id, | |
2975 | src, src_cnt, scf)); | |
2976 | ||
2977 | /* Always use WXOR for P/Q calculations (two destinations). | |
2978 | * Need 1 or 2 extra slots to verify results are zero. | |
2979 | */ | |
2980 | idst = dst_cnt = (pdest && qdest) ? 2 : 1; | |
2981 | ||
2982 | /* One additional slot per destination to clone P/Q | |
2983 | * before calculation (we have to preserve destinations). | |
2984 | */ | |
2985 | slot_cnt = src_cnt + dst_cnt * 2; | |
2986 | slots_per_op = 1; | |
2987 | ||
2988 | spin_lock_bh(&ppc440spe_chan->lock); | |
2989 | sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, | |
2990 | slots_per_op); | |
2991 | if (sw_desc) { | |
2992 | ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt); | |
2993 | ||
2994 | /* Setup byte count for each slot just allocated */ | |
2995 | sw_desc->async_tx.flags = flags; | |
2996 | list_for_each_entry(iter, &sw_desc->group_list, chain_node) { | |
2997 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, | |
2998 | len); | |
2999 | iter->unmap_len = len; | |
3000 | } | |
3001 | ||
3002 | if (pdest) { | |
3003 | struct dma_cdb *hw_desc; | |
3004 | struct ppc440spe_adma_chan *chan; | |
3005 | ||
3006 | iter = sw_desc->group_head; | |
3007 | chan = to_ppc440spe_adma_chan(iter->async_tx.chan); | |
3008 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
3009 | iter->hw_next = list_entry(iter->chain_node.next, | |
3010 | struct ppc440spe_adma_desc_slot, | |
3011 | chain_node); | |
3012 | hw_desc = iter->hw_desc; | |
3013 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
3014 | iter->src_cnt = 0; | |
3015 | iter->dst_cnt = 0; | |
3016 | ppc440spe_desc_set_dest_addr(iter, chan, 0, | |
3017 | ppc440spe_chan->pdest, 0); | |
3018 | ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest); | |
3019 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, | |
3020 | len); | |
3021 | iter->unmap_len = 0; | |
3022 | /* override pdest to preserve original P */ | |
3023 | pdest = ppc440spe_chan->pdest; | |
3024 | } | |
3025 | if (qdest) { | |
3026 | struct dma_cdb *hw_desc; | |
3027 | struct ppc440spe_adma_chan *chan; | |
3028 | ||
3029 | iter = list_first_entry(&sw_desc->group_list, | |
3030 | struct ppc440spe_adma_desc_slot, | |
3031 | chain_node); | |
3032 | chan = to_ppc440spe_adma_chan(iter->async_tx.chan); | |
3033 | ||
3034 | if (pdest) { | |
3035 | iter = list_entry(iter->chain_node.next, | |
3036 | struct ppc440spe_adma_desc_slot, | |
3037 | chain_node); | |
3038 | } | |
3039 | ||
3040 | memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); | |
3041 | iter->hw_next = list_entry(iter->chain_node.next, | |
3042 | struct ppc440spe_adma_desc_slot, | |
3043 | chain_node); | |
3044 | hw_desc = iter->hw_desc; | |
3045 | hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; | |
3046 | iter->src_cnt = 0; | |
3047 | iter->dst_cnt = 0; | |
3048 | ppc440spe_desc_set_dest_addr(iter, chan, 0, | |
3049 | ppc440spe_chan->qdest, 0); | |
3050 | ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest); | |
3051 | ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, | |
3052 | len); | |
3053 | iter->unmap_len = 0; | |
3054 | /* override qdest to preserve original Q */ | |
3055 | qdest = ppc440spe_chan->qdest; | |
3056 | } | |
3057 | ||
3058 | /* Setup destinations for P/Q ops */ | |
3059 | ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest); | |
3060 | ||
3061 | /* Setup zero QWORDs into DCHECK CDBs */ | |
3062 | idst = dst_cnt; | |
3063 | list_for_each_entry_reverse(iter, &sw_desc->group_list, | |
3064 | chain_node) { | |
3065 | /* | |
3066 | * The last CDB corresponds to Q-parity check, | |
3067 | * the one before last CDB corresponds | |
3068 | * P-parity check | |
3069 | */ | |
3070 | if (idst == DMA_DEST_MAX_NUM) { | |
3071 | if (idst == dst_cnt) { | |
3072 | set_bit(PPC440SPE_DESC_QCHECK, | |
3073 | &iter->flags); | |
3074 | } else { | |
3075 | set_bit(PPC440SPE_DESC_PCHECK, | |
3076 | &iter->flags); | |
3077 | } | |
3078 | } else { | |
3079 | if (qdest) { | |
3080 | set_bit(PPC440SPE_DESC_QCHECK, | |
3081 | &iter->flags); | |
3082 | } else { | |
3083 | set_bit(PPC440SPE_DESC_PCHECK, | |
3084 | &iter->flags); | |
3085 | } | |
3086 | } | |
3087 | iter->xor_check_result = pqres; | |
3088 | ||
3089 | /* | |
3090 | * set it to zero, if check fail then result will | |
3091 | * be updated | |
3092 | */ | |
3093 | *iter->xor_check_result = 0; | |
3094 | ppc440spe_desc_set_dcheck(iter, ppc440spe_chan, | |
3095 | ppc440spe_qword); | |
3096 | ||
3097 | if (!(--dst_cnt)) | |
3098 | break; | |
3099 | } | |
3100 | ||
3101 | /* Setup sources and mults for P/Q ops */ | |
3102 | list_for_each_entry_continue_reverse(iter, &sw_desc->group_list, | |
3103 | chain_node) { | |
3104 | struct ppc440spe_adma_chan *chan; | |
3105 | u32 mult_dst; | |
3106 | ||
3107 | chan = to_ppc440spe_adma_chan(iter->async_tx.chan); | |
3108 | ppc440spe_desc_set_src_addr(iter, chan, 0, | |
3109 | DMA_CUED_XOR_HB, | |
3110 | src[src_cnt - 1]); | |
3111 | if (qdest) { | |
3112 | mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 : | |
3113 | DMA_CDB_SG_DST1; | |
3114 | ppc440spe_desc_set_src_mult(iter, chan, | |
3115 | DMA_CUED_MULT1_OFF, | |
3116 | mult_dst, | |
3117 | scf[src_cnt - 1]); | |
3118 | } | |
3119 | if (!(--src_cnt)) | |
3120 | break; | |
3121 | } | |
3122 | } | |
3123 | spin_unlock_bh(&ppc440spe_chan->lock); | |
3124 | return sw_desc ? &sw_desc->async_tx : NULL; | |
3125 | } | |
3126 | ||
3127 | /** | |
3128 | * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for | |
3129 | * XOR ZERO_SUM operation | |
3130 | */ | |
3131 | static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum( | |
3132 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | |
3133 | size_t len, enum sum_check_flags *result, unsigned long flags) | |
3134 | { | |
3135 | struct dma_async_tx_descriptor *tx; | |
3136 | dma_addr_t pq[2]; | |
3137 | ||
3138 | /* validate P, disable Q */ | |
3139 | pq[0] = src[0]; | |
3140 | pq[1] = 0; | |
3141 | flags |= DMA_PREP_PQ_DISABLE_Q; | |
3142 | ||
3143 | tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1], | |
3144 | src_cnt - 1, 0, len, | |
3145 | result, flags); | |
3146 | return tx; | |
3147 | } | |
3148 | ||
3149 | /** | |
3150 | * ppc440spe_adma_set_dest - set destination address into descriptor | |
3151 | */ | |
3152 | static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc, | |
3153 | dma_addr_t addr, int index) | |
3154 | { | |
3155 | struct ppc440spe_adma_chan *chan; | |
3156 | ||
3157 | BUG_ON(index >= sw_desc->dst_cnt); | |
3158 | ||
3159 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3160 | ||
3161 | switch (chan->device->id) { | |
3162 | case PPC440SPE_DMA0_ID: | |
3163 | case PPC440SPE_DMA1_ID: | |
3164 | /* to do: support transfers lengths > | |
3165 | * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT | |
3166 | */ | |
3167 | ppc440spe_desc_set_dest_addr(sw_desc->group_head, | |
3168 | chan, 0, addr, index); | |
3169 | break; | |
3170 | case PPC440SPE_XOR_ID: | |
3171 | sw_desc = ppc440spe_get_group_entry(sw_desc, index); | |
3172 | ppc440spe_desc_set_dest_addr(sw_desc, | |
3173 | chan, 0, addr, index); | |
3174 | break; | |
3175 | } | |
3176 | } | |
3177 | ||
3178 | static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter, | |
3179 | struct ppc440spe_adma_chan *chan, dma_addr_t addr) | |
3180 | { | |
3181 | /* To clear destinations update the descriptor | |
3182 | * (P or Q depending on index) as follows: | |
3183 | * addr is destination (0 corresponds to SG2): | |
3184 | */ | |
3185 | ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0); | |
3186 | ||
3187 | /* ... and the addr is source: */ | |
3188 | ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr); | |
3189 | ||
3190 | /* addr is always SG2 then the mult is always DST1 */ | |
3191 | ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, | |
3192 | DMA_CDB_SG_DST1, 1); | |
3193 | } | |
3194 | ||
3195 | /** | |
3196 | * ppc440spe_adma_pq_set_dest - set destination address into descriptor | |
3197 | * for the PQXOR operation | |
3198 | */ | |
3199 | static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc, | |
3200 | dma_addr_t *addrs, unsigned long flags) | |
3201 | { | |
3202 | struct ppc440spe_adma_desc_slot *iter; | |
3203 | struct ppc440spe_adma_chan *chan; | |
3204 | dma_addr_t paddr, qaddr; | |
3205 | dma_addr_t addr = 0, ppath, qpath; | |
3206 | int index = 0, i; | |
3207 | ||
3208 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3209 | ||
3210 | if (flags & DMA_PREP_PQ_DISABLE_P) | |
3211 | paddr = 0; | |
3212 | else | |
3213 | paddr = addrs[0]; | |
3214 | ||
3215 | if (flags & DMA_PREP_PQ_DISABLE_Q) | |
3216 | qaddr = 0; | |
3217 | else | |
3218 | qaddr = addrs[1]; | |
3219 | ||
3220 | if (!paddr || !qaddr) | |
3221 | addr = paddr ? paddr : qaddr; | |
3222 | ||
3223 | switch (chan->device->id) { | |
3224 | case PPC440SPE_DMA0_ID: | |
3225 | case PPC440SPE_DMA1_ID: | |
3226 | /* walk through the WXOR source list and set P/Q-destinations | |
3227 | * for each slot: | |
3228 | */ | |
3229 | if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { | |
3230 | /* This is WXOR-only chain; may have 1/2 zero descs */ | |
3231 | if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) | |
3232 | index++; | |
3233 | if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) | |
3234 | index++; | |
3235 | ||
3236 | iter = ppc440spe_get_group_entry(sw_desc, index); | |
3237 | if (addr) { | |
3238 | /* one destination */ | |
3239 | list_for_each_entry_from(iter, | |
3240 | &sw_desc->group_list, chain_node) | |
3241 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3242 | DMA_CUED_XOR_BASE, addr, 0); | |
3243 | } else { | |
3244 | /* two destinations */ | |
3245 | list_for_each_entry_from(iter, | |
3246 | &sw_desc->group_list, chain_node) { | |
3247 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3248 | DMA_CUED_XOR_BASE, paddr, 0); | |
3249 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3250 | DMA_CUED_XOR_BASE, qaddr, 1); | |
3251 | } | |
3252 | } | |
3253 | ||
3254 | if (index) { | |
3255 | /* To clear destinations update the descriptor | |
3256 | * (1st,2nd, or both depending on flags) | |
3257 | */ | |
3258 | index = 0; | |
3259 | if (test_bit(PPC440SPE_ZERO_P, | |
3260 | &sw_desc->flags)) { | |
3261 | iter = ppc440spe_get_group_entry( | |
3262 | sw_desc, index++); | |
3263 | ppc440spe_adma_pq_zero_op(iter, chan, | |
3264 | paddr); | |
3265 | } | |
3266 | ||
3267 | if (test_bit(PPC440SPE_ZERO_Q, | |
3268 | &sw_desc->flags)) { | |
3269 | iter = ppc440spe_get_group_entry( | |
3270 | sw_desc, index++); | |
3271 | ppc440spe_adma_pq_zero_op(iter, chan, | |
3272 | qaddr); | |
3273 | } | |
3274 | ||
3275 | return; | |
3276 | } | |
3277 | } else { | |
3278 | /* This is RXOR-only or RXOR/WXOR mixed chain */ | |
3279 | ||
3280 | /* If we want to include destination into calculations, | |
3281 | * then make dest addresses cued with mult=1 (XOR). | |
3282 | */ | |
3283 | ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? | |
3284 | DMA_CUED_XOR_HB : | |
3285 | DMA_CUED_XOR_BASE | | |
3286 | (1 << DMA_CUED_MULT1_OFF); | |
3287 | qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? | |
3288 | DMA_CUED_XOR_HB : | |
3289 | DMA_CUED_XOR_BASE | | |
3290 | (1 << DMA_CUED_MULT1_OFF); | |
3291 | ||
3292 | /* Setup destination(s) in RXOR slot(s) */ | |
3293 | iter = ppc440spe_get_group_entry(sw_desc, index++); | |
3294 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3295 | paddr ? ppath : qpath, | |
3296 | paddr ? paddr : qaddr, 0); | |
3297 | if (!addr) { | |
3298 | /* two destinations */ | |
3299 | iter = ppc440spe_get_group_entry(sw_desc, | |
3300 | index++); | |
3301 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3302 | qpath, qaddr, 0); | |
3303 | } | |
3304 | ||
3305 | if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) { | |
3306 | /* Setup destination(s) in remaining WXOR | |
3307 | * slots | |
3308 | */ | |
3309 | iter = ppc440spe_get_group_entry(sw_desc, | |
3310 | index); | |
3311 | if (addr) { | |
3312 | /* one destination */ | |
3313 | list_for_each_entry_from(iter, | |
3314 | &sw_desc->group_list, | |
3315 | chain_node) | |
3316 | ppc440spe_desc_set_dest_addr( | |
3317 | iter, chan, | |
3318 | DMA_CUED_XOR_BASE, | |
3319 | addr, 0); | |
3320 | ||
3321 | } else { | |
3322 | /* two destinations */ | |
3323 | list_for_each_entry_from(iter, | |
3324 | &sw_desc->group_list, | |
3325 | chain_node) { | |
3326 | ppc440spe_desc_set_dest_addr( | |
3327 | iter, chan, | |
3328 | DMA_CUED_XOR_BASE, | |
3329 | paddr, 0); | |
3330 | ppc440spe_desc_set_dest_addr( | |
3331 | iter, chan, | |
3332 | DMA_CUED_XOR_BASE, | |
3333 | qaddr, 1); | |
3334 | } | |
3335 | } | |
3336 | } | |
3337 | ||
3338 | } | |
3339 | break; | |
3340 | ||
3341 | case PPC440SPE_XOR_ID: | |
3342 | /* DMA2 descriptors have only 1 destination, so there are | |
3343 | * two chains - one for each dest. | |
3344 | * If we want to include destination into calculations, | |
3345 | * then make dest addresses cued with mult=1 (XOR). | |
3346 | */ | |
3347 | ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ? | |
3348 | DMA_CUED_XOR_HB : | |
3349 | DMA_CUED_XOR_BASE | | |
3350 | (1 << DMA_CUED_MULT1_OFF); | |
3351 | ||
3352 | qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ? | |
3353 | DMA_CUED_XOR_HB : | |
3354 | DMA_CUED_XOR_BASE | | |
3355 | (1 << DMA_CUED_MULT1_OFF); | |
3356 | ||
3357 | iter = ppc440spe_get_group_entry(sw_desc, 0); | |
3358 | for (i = 0; i < sw_desc->descs_per_op; i++) { | |
3359 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3360 | paddr ? ppath : qpath, | |
3361 | paddr ? paddr : qaddr, 0); | |
3362 | iter = list_entry(iter->chain_node.next, | |
3363 | struct ppc440spe_adma_desc_slot, | |
3364 | chain_node); | |
3365 | } | |
3366 | ||
3367 | if (!addr) { | |
3368 | /* Two destinations; setup Q here */ | |
3369 | iter = ppc440spe_get_group_entry(sw_desc, | |
3370 | sw_desc->descs_per_op); | |
3371 | for (i = 0; i < sw_desc->descs_per_op; i++) { | |
3372 | ppc440spe_desc_set_dest_addr(iter, | |
3373 | chan, qpath, qaddr, 0); | |
3374 | iter = list_entry(iter->chain_node.next, | |
3375 | struct ppc440spe_adma_desc_slot, | |
3376 | chain_node); | |
3377 | } | |
3378 | } | |
3379 | ||
3380 | break; | |
3381 | } | |
3382 | } | |
3383 | ||
3384 | /** | |
3385 | * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor | |
3386 | * for the PQ_ZERO_SUM operation | |
3387 | */ | |
3388 | static void ppc440spe_adma_pqzero_sum_set_dest( | |
3389 | struct ppc440spe_adma_desc_slot *sw_desc, | |
3390 | dma_addr_t paddr, dma_addr_t qaddr) | |
3391 | { | |
3392 | struct ppc440spe_adma_desc_slot *iter, *end; | |
3393 | struct ppc440spe_adma_chan *chan; | |
3394 | dma_addr_t addr = 0; | |
3395 | int idx; | |
3396 | ||
3397 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3398 | ||
3399 | /* walk through the WXOR source list and set P/Q-destinations | |
3400 | * for each slot | |
3401 | */ | |
3402 | idx = (paddr && qaddr) ? 2 : 1; | |
3403 | /* set end */ | |
3404 | list_for_each_entry_reverse(end, &sw_desc->group_list, | |
3405 | chain_node) { | |
3406 | if (!(--idx)) | |
3407 | break; | |
3408 | } | |
3409 | /* set start */ | |
3410 | idx = (paddr && qaddr) ? 2 : 1; | |
3411 | iter = ppc440spe_get_group_entry(sw_desc, idx); | |
3412 | ||
3413 | if (paddr && qaddr) { | |
3414 | /* two destinations */ | |
3415 | list_for_each_entry_from(iter, &sw_desc->group_list, | |
3416 | chain_node) { | |
3417 | if (unlikely(iter == end)) | |
3418 | break; | |
3419 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3420 | DMA_CUED_XOR_BASE, paddr, 0); | |
3421 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3422 | DMA_CUED_XOR_BASE, qaddr, 1); | |
3423 | } | |
3424 | } else { | |
3425 | /* one destination */ | |
3426 | addr = paddr ? paddr : qaddr; | |
3427 | list_for_each_entry_from(iter, &sw_desc->group_list, | |
3428 | chain_node) { | |
3429 | if (unlikely(iter == end)) | |
3430 | break; | |
3431 | ppc440spe_desc_set_dest_addr(iter, chan, | |
3432 | DMA_CUED_XOR_BASE, addr, 0); | |
3433 | } | |
3434 | } | |
3435 | ||
3436 | /* The remaining descriptors are DATACHECK. These have no need in | |
3437 | * destination. Actually, these destinations are used there | |
3438 | * as sources for check operation. So, set addr as source. | |
3439 | */ | |
3440 | ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr); | |
3441 | ||
3442 | if (!addr) { | |
3443 | end = list_entry(end->chain_node.next, | |
3444 | struct ppc440spe_adma_desc_slot, chain_node); | |
3445 | ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr); | |
3446 | } | |
3447 | } | |
3448 | ||
3449 | /** | |
3450 | * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor | |
3451 | */ | |
3452 | static inline void ppc440spe_desc_set_xor_src_cnt( | |
3453 | struct ppc440spe_adma_desc_slot *desc, | |
3454 | int src_cnt) | |
3455 | { | |
3456 | struct xor_cb *hw_desc = desc->hw_desc; | |
3457 | ||
3458 | hw_desc->cbc &= ~XOR_CDCR_OAC_MSK; | |
3459 | hw_desc->cbc |= src_cnt; | |
3460 | } | |
3461 | ||
3462 | /** | |
3463 | * ppc440spe_adma_pq_set_src - set source address into descriptor | |
3464 | */ | |
3465 | static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc, | |
3466 | dma_addr_t addr, int index) | |
3467 | { | |
3468 | struct ppc440spe_adma_chan *chan; | |
3469 | dma_addr_t haddr = 0; | |
3470 | struct ppc440spe_adma_desc_slot *iter = NULL; | |
3471 | ||
3472 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3473 | ||
3474 | switch (chan->device->id) { | |
3475 | case PPC440SPE_DMA0_ID: | |
3476 | case PPC440SPE_DMA1_ID: | |
3477 | /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain | |
3478 | */ | |
3479 | if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { | |
3480 | /* RXOR-only or RXOR/WXOR operation */ | |
3481 | int iskip = test_bit(PPC440SPE_DESC_RXOR12, | |
3482 | &sw_desc->flags) ? 2 : 3; | |
3483 | ||
3484 | if (index == 0) { | |
3485 | /* 1st slot (RXOR) */ | |
3486 | /* setup sources region (R1-2-3, R1-2-4, | |
3487 | * or R1-2-5) | |
3488 | */ | |
3489 | if (test_bit(PPC440SPE_DESC_RXOR12, | |
3490 | &sw_desc->flags)) | |
3491 | haddr = DMA_RXOR12 << | |
3492 | DMA_CUED_REGION_OFF; | |
3493 | else if (test_bit(PPC440SPE_DESC_RXOR123, | |
3494 | &sw_desc->flags)) | |
3495 | haddr = DMA_RXOR123 << | |
3496 | DMA_CUED_REGION_OFF; | |
3497 | else if (test_bit(PPC440SPE_DESC_RXOR124, | |
3498 | &sw_desc->flags)) | |
3499 | haddr = DMA_RXOR124 << | |
3500 | DMA_CUED_REGION_OFF; | |
3501 | else if (test_bit(PPC440SPE_DESC_RXOR125, | |
3502 | &sw_desc->flags)) | |
3503 | haddr = DMA_RXOR125 << | |
3504 | DMA_CUED_REGION_OFF; | |
3505 | else | |
3506 | BUG(); | |
3507 | haddr |= DMA_CUED_XOR_BASE; | |
3508 | iter = ppc440spe_get_group_entry(sw_desc, 0); | |
3509 | } else if (index < iskip) { | |
3510 | /* 1st slot (RXOR) | |
3511 | * shall actually set source address only once | |
3512 | * instead of first <iskip> | |
3513 | */ | |
3514 | iter = NULL; | |
3515 | } else { | |
3516 | /* 2nd/3d and next slots (WXOR); | |
3517 | * skip first slot with RXOR | |
3518 | */ | |
3519 | haddr = DMA_CUED_XOR_HB; | |
3520 | iter = ppc440spe_get_group_entry(sw_desc, | |
3521 | index - iskip + sw_desc->dst_cnt); | |
3522 | } | |
3523 | } else { | |
3524 | int znum = 0; | |
3525 | ||
3526 | /* WXOR-only operation; skip first slots with | |
3527 | * zeroing destinations | |
3528 | */ | |
3529 | if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) | |
3530 | znum++; | |
3531 | if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) | |
3532 | znum++; | |
3533 | ||
3534 | haddr = DMA_CUED_XOR_HB; | |
3535 | iter = ppc440spe_get_group_entry(sw_desc, | |
3536 | index + znum); | |
3537 | } | |
3538 | ||
3539 | if (likely(iter)) { | |
3540 | ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr); | |
3541 | ||
3542 | if (!index && | |
3543 | test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) && | |
3544 | sw_desc->dst_cnt == 2) { | |
3545 | /* if we have two destinations for RXOR, then | |
3546 | * setup source in the second descr too | |
3547 | */ | |
3548 | iter = ppc440spe_get_group_entry(sw_desc, 1); | |
3549 | ppc440spe_desc_set_src_addr(iter, chan, 0, | |
3550 | haddr, addr); | |
3551 | } | |
3552 | } | |
3553 | break; | |
3554 | ||
3555 | case PPC440SPE_XOR_ID: | |
3556 | /* DMA2 may do Biskup */ | |
3557 | iter = sw_desc->group_head; | |
3558 | if (iter->dst_cnt == 2) { | |
3559 | /* both P & Q calculations required; set P src here */ | |
3560 | ppc440spe_adma_dma2rxor_set_src(iter, index, addr); | |
3561 | ||
3562 | /* this is for Q */ | |
3563 | iter = ppc440spe_get_group_entry(sw_desc, | |
3564 | sw_desc->descs_per_op); | |
3565 | } | |
3566 | ppc440spe_adma_dma2rxor_set_src(iter, index, addr); | |
3567 | break; | |
3568 | } | |
3569 | } | |
3570 | ||
3571 | /** | |
3572 | * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor | |
3573 | */ | |
3574 | static void ppc440spe_adma_memcpy_xor_set_src( | |
3575 | struct ppc440spe_adma_desc_slot *sw_desc, | |
3576 | dma_addr_t addr, int index) | |
3577 | { | |
3578 | struct ppc440spe_adma_chan *chan; | |
3579 | ||
3580 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3581 | sw_desc = sw_desc->group_head; | |
3582 | ||
3583 | if (likely(sw_desc)) | |
3584 | ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr); | |
3585 | } | |
3586 | ||
3587 | /** | |
3588 | * ppc440spe_adma_dma2rxor_inc_addr - | |
3589 | */ | |
3590 | static void ppc440spe_adma_dma2rxor_inc_addr( | |
3591 | struct ppc440spe_adma_desc_slot *desc, | |
3592 | struct ppc440spe_rxor *cursor, int index, int src_cnt) | |
3593 | { | |
3594 | cursor->addr_count++; | |
3595 | if (index == src_cnt - 1) { | |
3596 | ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); | |
3597 | } else if (cursor->addr_count == XOR_MAX_OPS) { | |
3598 | ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count); | |
3599 | cursor->addr_count = 0; | |
3600 | cursor->desc_count++; | |
3601 | } | |
3602 | } | |
3603 | ||
3604 | /** | |
3605 | * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB | |
3606 | */ | |
3607 | static int ppc440spe_adma_dma2rxor_prep_src( | |
3608 | struct ppc440spe_adma_desc_slot *hdesc, | |
3609 | struct ppc440spe_rxor *cursor, int index, | |
3610 | int src_cnt, u32 addr) | |
3611 | { | |
3612 | int rval = 0; | |
3613 | u32 sign; | |
3614 | struct ppc440spe_adma_desc_slot *desc = hdesc; | |
3615 | int i; | |
3616 | ||
3617 | for (i = 0; i < cursor->desc_count; i++) { | |
3618 | desc = list_entry(hdesc->chain_node.next, | |
3619 | struct ppc440spe_adma_desc_slot, | |
3620 | chain_node); | |
3621 | } | |
3622 | ||
3623 | switch (cursor->state) { | |
3624 | case 0: | |
3625 | if (addr == cursor->addrl + cursor->len) { | |
3626 | /* direct RXOR */ | |
3627 | cursor->state = 1; | |
3628 | cursor->xor_count++; | |
3629 | if (index == src_cnt-1) { | |
3630 | ppc440spe_rxor_set_region(desc, | |
3631 | cursor->addr_count, | |
3632 | DMA_RXOR12 << DMA_CUED_REGION_OFF); | |
3633 | ppc440spe_adma_dma2rxor_inc_addr( | |
3634 | desc, cursor, index, src_cnt); | |
3635 | } | |
3636 | } else if (cursor->addrl == addr + cursor->len) { | |
3637 | /* reverse RXOR */ | |
3638 | cursor->state = 1; | |
3639 | cursor->xor_count++; | |
3640 | set_bit(cursor->addr_count, &desc->reverse_flags[0]); | |
3641 | if (index == src_cnt-1) { | |
3642 | ppc440spe_rxor_set_region(desc, | |
3643 | cursor->addr_count, | |
3644 | DMA_RXOR12 << DMA_CUED_REGION_OFF); | |
3645 | ppc440spe_adma_dma2rxor_inc_addr( | |
3646 | desc, cursor, index, src_cnt); | |
3647 | } | |
3648 | } else { | |
3649 | printk(KERN_ERR "Cannot build " | |
3650 | "DMA2 RXOR command block.\n"); | |
3651 | BUG(); | |
3652 | } | |
3653 | break; | |
3654 | case 1: | |
3655 | sign = test_bit(cursor->addr_count, | |
3656 | desc->reverse_flags) | |
3657 | ? -1 : 1; | |
3658 | if (index == src_cnt-2 || (sign == -1 | |
3659 | && addr != cursor->addrl - 2*cursor->len)) { | |
3660 | cursor->state = 0; | |
3661 | cursor->xor_count = 1; | |
3662 | cursor->addrl = addr; | |
3663 | ppc440spe_rxor_set_region(desc, | |
3664 | cursor->addr_count, | |
3665 | DMA_RXOR12 << DMA_CUED_REGION_OFF); | |
3666 | ppc440spe_adma_dma2rxor_inc_addr( | |
3667 | desc, cursor, index, src_cnt); | |
3668 | } else if (addr == cursor->addrl + 2*sign*cursor->len) { | |
3669 | cursor->state = 2; | |
3670 | cursor->xor_count = 0; | |
3671 | ppc440spe_rxor_set_region(desc, | |
3672 | cursor->addr_count, | |
3673 | DMA_RXOR123 << DMA_CUED_REGION_OFF); | |
3674 | if (index == src_cnt-1) { | |
3675 | ppc440spe_adma_dma2rxor_inc_addr( | |
3676 | desc, cursor, index, src_cnt); | |
3677 | } | |
3678 | } else if (addr == cursor->addrl + 3*cursor->len) { | |
3679 | cursor->state = 2; | |
3680 | cursor->xor_count = 0; | |
3681 | ppc440spe_rxor_set_region(desc, | |
3682 | cursor->addr_count, | |
3683 | DMA_RXOR124 << DMA_CUED_REGION_OFF); | |
3684 | if (index == src_cnt-1) { | |
3685 | ppc440spe_adma_dma2rxor_inc_addr( | |
3686 | desc, cursor, index, src_cnt); | |
3687 | } | |
3688 | } else if (addr == cursor->addrl + 4*cursor->len) { | |
3689 | cursor->state = 2; | |
3690 | cursor->xor_count = 0; | |
3691 | ppc440spe_rxor_set_region(desc, | |
3692 | cursor->addr_count, | |
3693 | DMA_RXOR125 << DMA_CUED_REGION_OFF); | |
3694 | if (index == src_cnt-1) { | |
3695 | ppc440spe_adma_dma2rxor_inc_addr( | |
3696 | desc, cursor, index, src_cnt); | |
3697 | } | |
3698 | } else { | |
3699 | cursor->state = 0; | |
3700 | cursor->xor_count = 1; | |
3701 | cursor->addrl = addr; | |
3702 | ppc440spe_rxor_set_region(desc, | |
3703 | cursor->addr_count, | |
3704 | DMA_RXOR12 << DMA_CUED_REGION_OFF); | |
3705 | ppc440spe_adma_dma2rxor_inc_addr( | |
3706 | desc, cursor, index, src_cnt); | |
3707 | } | |
3708 | break; | |
3709 | case 2: | |
3710 | cursor->state = 0; | |
3711 | cursor->addrl = addr; | |
3712 | cursor->xor_count++; | |
3713 | if (index) { | |
3714 | ppc440spe_adma_dma2rxor_inc_addr( | |
3715 | desc, cursor, index, src_cnt); | |
3716 | } | |
3717 | break; | |
3718 | } | |
3719 | ||
3720 | return rval; | |
3721 | } | |
3722 | ||
3723 | /** | |
3724 | * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that | |
3725 | * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call | |
3726 | */ | |
3727 | static void ppc440spe_adma_dma2rxor_set_src( | |
3728 | struct ppc440spe_adma_desc_slot *desc, | |
3729 | int index, dma_addr_t addr) | |
3730 | { | |
3731 | struct xor_cb *xcb = desc->hw_desc; | |
3732 | int k = 0, op = 0, lop = 0; | |
3733 | ||
3734 | /* get the RXOR operand which corresponds to index addr */ | |
3735 | while (op <= index) { | |
3736 | lop = op; | |
3737 | if (k == XOR_MAX_OPS) { | |
3738 | k = 0; | |
3739 | desc = list_entry(desc->chain_node.next, | |
3740 | struct ppc440spe_adma_desc_slot, chain_node); | |
3741 | xcb = desc->hw_desc; | |
3742 | ||
3743 | } | |
3744 | if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == | |
3745 | (DMA_RXOR12 << DMA_CUED_REGION_OFF)) | |
3746 | op += 2; | |
3747 | else | |
3748 | op += 3; | |
3749 | } | |
3750 | ||
3751 | BUG_ON(k < 1); | |
3752 | ||
3753 | if (test_bit(k-1, desc->reverse_flags)) { | |
3754 | /* reverse operand order; put last op in RXOR group */ | |
3755 | if (index == op - 1) | |
3756 | ppc440spe_rxor_set_src(desc, k - 1, addr); | |
3757 | } else { | |
3758 | /* direct operand order; put first op in RXOR group */ | |
3759 | if (index == lop) | |
3760 | ppc440spe_rxor_set_src(desc, k - 1, addr); | |
3761 | } | |
3762 | } | |
3763 | ||
3764 | /** | |
3765 | * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that | |
3766 | * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call | |
3767 | */ | |
3768 | static void ppc440spe_adma_dma2rxor_set_mult( | |
3769 | struct ppc440spe_adma_desc_slot *desc, | |
3770 | int index, u8 mult) | |
3771 | { | |
3772 | struct xor_cb *xcb = desc->hw_desc; | |
3773 | int k = 0, op = 0, lop = 0; | |
3774 | ||
3775 | /* get the RXOR operand which corresponds to index mult */ | |
3776 | while (op <= index) { | |
3777 | lop = op; | |
3778 | if (k == XOR_MAX_OPS) { | |
3779 | k = 0; | |
3780 | desc = list_entry(desc->chain_node.next, | |
3781 | struct ppc440spe_adma_desc_slot, | |
3782 | chain_node); | |
3783 | xcb = desc->hw_desc; | |
3784 | ||
3785 | } | |
3786 | if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) == | |
3787 | (DMA_RXOR12 << DMA_CUED_REGION_OFF)) | |
3788 | op += 2; | |
3789 | else | |
3790 | op += 3; | |
3791 | } | |
3792 | ||
3793 | BUG_ON(k < 1); | |
3794 | if (test_bit(k-1, desc->reverse_flags)) { | |
3795 | /* reverse order */ | |
3796 | ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult); | |
3797 | } else { | |
3798 | /* direct order */ | |
3799 | ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult); | |
3800 | } | |
3801 | } | |
3802 | ||
3803 | /** | |
3804 | * ppc440spe_init_rxor_cursor - | |
3805 | */ | |
3806 | static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor) | |
3807 | { | |
3808 | memset(cursor, 0, sizeof(struct ppc440spe_rxor)); | |
3809 | cursor->state = 2; | |
3810 | } | |
3811 | ||
3812 | /** | |
3813 | * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into | |
3814 | * descriptor for the PQXOR operation | |
3815 | */ | |
3816 | static void ppc440spe_adma_pq_set_src_mult( | |
3817 | struct ppc440spe_adma_desc_slot *sw_desc, | |
3818 | unsigned char mult, int index, int dst_pos) | |
3819 | { | |
3820 | struct ppc440spe_adma_chan *chan; | |
3821 | u32 mult_idx, mult_dst; | |
3822 | struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL; | |
3823 | ||
3824 | chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan); | |
3825 | ||
3826 | switch (chan->device->id) { | |
3827 | case PPC440SPE_DMA0_ID: | |
3828 | case PPC440SPE_DMA1_ID: | |
3829 | if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) { | |
3830 | int region = test_bit(PPC440SPE_DESC_RXOR12, | |
3831 | &sw_desc->flags) ? 2 : 3; | |
3832 | ||
3833 | if (index < region) { | |
3834 | /* RXOR multipliers */ | |
3835 | iter = ppc440spe_get_group_entry(sw_desc, | |
3836 | sw_desc->dst_cnt - 1); | |
3837 | if (sw_desc->dst_cnt == 2) | |
3838 | iter1 = ppc440spe_get_group_entry( | |
3839 | sw_desc, 0); | |
3840 | ||
3841 | mult_idx = DMA_CUED_MULT1_OFF + (index << 3); | |
3842 | mult_dst = DMA_CDB_SG_SRC; | |
3843 | } else { | |
3844 | /* WXOR multiplier */ | |
3845 | iter = ppc440spe_get_group_entry(sw_desc, | |
3846 | index - region + | |
3847 | sw_desc->dst_cnt); | |
3848 | mult_idx = DMA_CUED_MULT1_OFF; | |
3849 | mult_dst = dst_pos ? DMA_CDB_SG_DST2 : | |
3850 | DMA_CDB_SG_DST1; | |
3851 | } | |
3852 | } else { | |
3853 | int znum = 0; | |
3854 | ||
3855 | /* WXOR-only; | |
3856 | * skip first slots with destinations (if ZERO_DST has | |
3857 | * place) | |
3858 | */ | |
3859 | if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags)) | |
3860 | znum++; | |
3861 | if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags)) | |
3862 | znum++; | |
3863 | ||
3864 | iter = ppc440spe_get_group_entry(sw_desc, index + znum); | |
3865 | mult_idx = DMA_CUED_MULT1_OFF; | |
3866 | mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1; | |
3867 | } | |
3868 | ||
3869 | if (likely(iter)) { | |
3870 | ppc440spe_desc_set_src_mult(iter, chan, | |
3871 | mult_idx, mult_dst, mult); | |
3872 | ||
3873 | if (unlikely(iter1)) { | |
3874 | /* if we have two destinations for RXOR, then | |
3875 | * we've just set Q mult. Set-up P now. | |
3876 | */ | |
3877 | ppc440spe_desc_set_src_mult(iter1, chan, | |
3878 | mult_idx, mult_dst, 1); | |
3879 | } | |
3880 | ||
3881 | } | |
3882 | break; | |
3883 | ||
3884 | case PPC440SPE_XOR_ID: | |
3885 | iter = sw_desc->group_head; | |
3886 | if (sw_desc->dst_cnt == 2) { | |
3887 | /* both P & Q calculations required; set P mult here */ | |
3888 | ppc440spe_adma_dma2rxor_set_mult(iter, index, 1); | |
3889 | ||
3890 | /* and then set Q mult */ | |
3891 | iter = ppc440spe_get_group_entry(sw_desc, | |
3892 | sw_desc->descs_per_op); | |
3893 | } | |
3894 | ppc440spe_adma_dma2rxor_set_mult(iter, index, mult); | |
3895 | break; | |
3896 | } | |
3897 | } | |
3898 | ||
3899 | /** | |
3900 | * ppc440spe_adma_free_chan_resources - free the resources allocated | |
3901 | */ | |
3902 | static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan) | |
3903 | { | |
3904 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
3905 | struct ppc440spe_adma_desc_slot *iter, *_iter; | |
3906 | int in_use_descs = 0; | |
3907 | ||
3908 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
3909 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | |
3910 | ||
3911 | spin_lock_bh(&ppc440spe_chan->lock); | |
3912 | list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain, | |
3913 | chain_node) { | |
3914 | in_use_descs++; | |
3915 | list_del(&iter->chain_node); | |
3916 | } | |
3917 | list_for_each_entry_safe_reverse(iter, _iter, | |
3918 | &ppc440spe_chan->all_slots, slot_node) { | |
3919 | list_del(&iter->slot_node); | |
3920 | kfree(iter); | |
3921 | ppc440spe_chan->slots_allocated--; | |
3922 | } | |
3923 | ppc440spe_chan->last_used = NULL; | |
3924 | ||
3925 | dev_dbg(ppc440spe_chan->device->common.dev, | |
3926 | "ppc440spe adma%d %s slots_allocated %d\n", | |
3927 | ppc440spe_chan->device->id, | |
3928 | __func__, ppc440spe_chan->slots_allocated); | |
3929 | spin_unlock_bh(&ppc440spe_chan->lock); | |
3930 | ||
3931 | /* one is ok since we left it on there on purpose */ | |
3932 | if (in_use_descs > 1) | |
3933 | printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n", | |
3934 | in_use_descs - 1); | |
3935 | } | |
3936 | ||
3937 | /** | |
07934481 | 3938 | * ppc440spe_adma_tx_status - poll the status of an ADMA transaction |
12458ea0 AG |
3939 | * @chan: ADMA channel handle |
3940 | * @cookie: ADMA transaction identifier | |
07934481 | 3941 | * @txstate: a holder for the current state of the channel |
12458ea0 | 3942 | */ |
07934481 LW |
3943 | static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, |
3944 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
12458ea0 AG |
3945 | { |
3946 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
3947 | dma_cookie_t last_used; | |
3948 | dma_cookie_t last_complete; | |
3949 | enum dma_status ret; | |
3950 | ||
3951 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
3952 | last_used = chan->cookie; | |
3953 | last_complete = ppc440spe_chan->completed_cookie; | |
3954 | ||
bca34692 | 3955 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
12458ea0 AG |
3956 | |
3957 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
3958 | if (ret == DMA_SUCCESS) | |
3959 | return ret; | |
3960 | ||
3961 | ppc440spe_adma_slot_cleanup(ppc440spe_chan); | |
3962 | ||
3963 | last_used = chan->cookie; | |
3964 | last_complete = ppc440spe_chan->completed_cookie; | |
3965 | ||
bca34692 | 3966 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
12458ea0 AG |
3967 | |
3968 | return dma_async_is_complete(cookie, last_complete, last_used); | |
3969 | } | |
3970 | ||
3971 | /** | |
3972 | * ppc440spe_adma_eot_handler - end of transfer interrupt handler | |
3973 | */ | |
3974 | static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data) | |
3975 | { | |
3976 | struct ppc440spe_adma_chan *chan = data; | |
3977 | ||
3978 | dev_dbg(chan->device->common.dev, | |
3979 | "ppc440spe adma%d: %s\n", chan->device->id, __func__); | |
3980 | ||
3981 | tasklet_schedule(&chan->irq_tasklet); | |
3982 | ppc440spe_adma_device_clear_eot_status(chan); | |
3983 | ||
3984 | return IRQ_HANDLED; | |
3985 | } | |
3986 | ||
3987 | /** | |
3988 | * ppc440spe_adma_err_handler - DMA error interrupt handler; | |
3989 | * do the same things as a eot handler | |
3990 | */ | |
3991 | static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data) | |
3992 | { | |
3993 | struct ppc440spe_adma_chan *chan = data; | |
3994 | ||
3995 | dev_dbg(chan->device->common.dev, | |
3996 | "ppc440spe adma%d: %s\n", chan->device->id, __func__); | |
3997 | ||
3998 | tasklet_schedule(&chan->irq_tasklet); | |
3999 | ppc440spe_adma_device_clear_eot_status(chan); | |
4000 | ||
4001 | return IRQ_HANDLED; | |
4002 | } | |
4003 | ||
4004 | /** | |
4005 | * ppc440spe_test_callback - called when test operation has been done | |
4006 | */ | |
4007 | static void ppc440spe_test_callback(void *unused) | |
4008 | { | |
4009 | complete(&ppc440spe_r6_test_comp); | |
4010 | } | |
4011 | ||
4012 | /** | |
4013 | * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w | |
4014 | */ | |
4015 | static void ppc440spe_adma_issue_pending(struct dma_chan *chan) | |
4016 | { | |
4017 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
4018 | ||
4019 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
4020 | dev_dbg(ppc440spe_chan->device->common.dev, | |
4021 | "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id, | |
4022 | __func__, ppc440spe_chan->pending); | |
4023 | ||
4024 | if (ppc440spe_chan->pending) { | |
4025 | ppc440spe_chan->pending = 0; | |
4026 | ppc440spe_chan_append(ppc440spe_chan); | |
4027 | } | |
4028 | } | |
4029 | ||
4030 | /** | |
4031 | * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines | |
4032 | * use FIFOs (as opposite to chains used in XOR) so this is a XOR | |
4033 | * specific operation) | |
4034 | */ | |
4035 | static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan) | |
4036 | { | |
4037 | struct ppc440spe_adma_desc_slot *sw_desc, *group_start; | |
4038 | dma_cookie_t cookie; | |
4039 | int slot_cnt, slots_per_op; | |
4040 | ||
4041 | dev_dbg(chan->device->common.dev, | |
4042 | "ppc440spe adma%d: %s\n", chan->device->id, __func__); | |
4043 | ||
4044 | spin_lock_bh(&chan->lock); | |
4045 | slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op); | |
4046 | sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op); | |
4047 | if (sw_desc) { | |
4048 | group_start = sw_desc->group_head; | |
4049 | list_splice_init(&sw_desc->group_list, &chan->chain); | |
4050 | async_tx_ack(&sw_desc->async_tx); | |
4051 | ppc440spe_desc_init_null_xor(group_start); | |
4052 | ||
4053 | cookie = chan->common.cookie; | |
4054 | cookie++; | |
4055 | if (cookie <= 1) | |
4056 | cookie = 2; | |
4057 | ||
4058 | /* initialize the completed cookie to be less than | |
4059 | * the most recently used cookie | |
4060 | */ | |
4061 | chan->completed_cookie = cookie - 1; | |
4062 | chan->common.cookie = sw_desc->async_tx.cookie = cookie; | |
4063 | ||
4064 | /* channel should not be busy */ | |
4065 | BUG_ON(ppc440spe_chan_is_busy(chan)); | |
4066 | ||
4067 | /* set the descriptor address */ | |
4068 | ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc); | |
4069 | ||
4070 | /* run the descriptor */ | |
4071 | ppc440spe_chan_run(chan); | |
4072 | } else | |
4073 | printk(KERN_ERR "ppc440spe adma%d" | |
4074 | " failed to allocate null descriptor\n", | |
4075 | chan->device->id); | |
4076 | spin_unlock_bh(&chan->lock); | |
4077 | } | |
4078 | ||
4079 | /** | |
4080 | * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully. | |
4081 | * For this we just perform one WXOR operation with the same source | |
4082 | * and destination addresses, the GF-multiplier is 1; so if RAID-6 | |
4083 | * capabilities are enabled then we'll get src/dst filled with zero. | |
4084 | */ | |
4085 | static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan) | |
4086 | { | |
4087 | struct ppc440spe_adma_desc_slot *sw_desc, *iter; | |
4088 | struct page *pg; | |
4089 | char *a; | |
4090 | dma_addr_t dma_addr, addrs[2]; | |
4091 | unsigned long op = 0; | |
4092 | int rval = 0; | |
4093 | ||
4094 | set_bit(PPC440SPE_DESC_WXOR, &op); | |
4095 | ||
4096 | pg = alloc_page(GFP_KERNEL); | |
4097 | if (!pg) | |
4098 | return -ENOMEM; | |
4099 | ||
4100 | spin_lock_bh(&chan->lock); | |
4101 | sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1); | |
4102 | if (sw_desc) { | |
4103 | /* 1 src, 1 dsr, int_ena, WXOR */ | |
4104 | ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op); | |
4105 | list_for_each_entry(iter, &sw_desc->group_list, chain_node) { | |
4106 | ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE); | |
4107 | iter->unmap_len = PAGE_SIZE; | |
4108 | } | |
4109 | } else { | |
4110 | rval = -EFAULT; | |
4111 | spin_unlock_bh(&chan->lock); | |
4112 | goto exit; | |
4113 | } | |
4114 | spin_unlock_bh(&chan->lock); | |
4115 | ||
4116 | /* Fill the test page with ones */ | |
4117 | memset(page_address(pg), 0xFF, PAGE_SIZE); | |
4118 | dma_addr = dma_map_page(chan->device->dev, pg, 0, | |
4119 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4120 | ||
4121 | /* Setup addresses */ | |
4122 | ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0); | |
4123 | ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0); | |
4124 | addrs[0] = dma_addr; | |
4125 | addrs[1] = 0; | |
4126 | ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q); | |
4127 | ||
4128 | async_tx_ack(&sw_desc->async_tx); | |
4129 | sw_desc->async_tx.callback = ppc440spe_test_callback; | |
4130 | sw_desc->async_tx.callback_param = NULL; | |
4131 | ||
4132 | init_completion(&ppc440spe_r6_test_comp); | |
4133 | ||
4134 | ppc440spe_adma_tx_submit(&sw_desc->async_tx); | |
4135 | ppc440spe_adma_issue_pending(&chan->common); | |
4136 | ||
4137 | wait_for_completion(&ppc440spe_r6_test_comp); | |
4138 | ||
4139 | /* Now check if the test page is zeroed */ | |
4140 | a = page_address(pg); | |
4141 | if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) { | |
4142 | /* page is zero - RAID-6 enabled */ | |
4143 | rval = 0; | |
4144 | } else { | |
4145 | /* RAID-6 was not enabled */ | |
4146 | rval = -EINVAL; | |
4147 | } | |
4148 | exit: | |
4149 | __free_page(pg); | |
4150 | return rval; | |
4151 | } | |
4152 | ||
4153 | static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) | |
4154 | { | |
4155 | switch (adev->id) { | |
4156 | case PPC440SPE_DMA0_ID: | |
4157 | case PPC440SPE_DMA1_ID: | |
4158 | dma_cap_set(DMA_MEMCPY, adev->common.cap_mask); | |
4159 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); | |
4160 | dma_cap_set(DMA_MEMSET, adev->common.cap_mask); | |
4161 | dma_cap_set(DMA_PQ, adev->common.cap_mask); | |
4162 | dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask); | |
4163 | dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask); | |
4164 | break; | |
4165 | case PPC440SPE_XOR_ID: | |
4166 | dma_cap_set(DMA_XOR, adev->common.cap_mask); | |
4167 | dma_cap_set(DMA_PQ, adev->common.cap_mask); | |
4168 | dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask); | |
4169 | adev->common.cap_mask = adev->common.cap_mask; | |
4170 | break; | |
4171 | } | |
4172 | ||
4173 | /* Set base routines */ | |
4174 | adev->common.device_alloc_chan_resources = | |
4175 | ppc440spe_adma_alloc_chan_resources; | |
4176 | adev->common.device_free_chan_resources = | |
4177 | ppc440spe_adma_free_chan_resources; | |
07934481 | 4178 | adev->common.device_tx_status = ppc440spe_adma_tx_status; |
12458ea0 AG |
4179 | adev->common.device_issue_pending = ppc440spe_adma_issue_pending; |
4180 | ||
4181 | /* Set prep routines based on capability */ | |
4182 | if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) { | |
4183 | adev->common.device_prep_dma_memcpy = | |
4184 | ppc440spe_adma_prep_dma_memcpy; | |
4185 | } | |
4186 | if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) { | |
4187 | adev->common.device_prep_dma_memset = | |
4188 | ppc440spe_adma_prep_dma_memset; | |
4189 | } | |
4190 | if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) { | |
4191 | adev->common.max_xor = XOR_MAX_OPS; | |
4192 | adev->common.device_prep_dma_xor = | |
4193 | ppc440spe_adma_prep_dma_xor; | |
4194 | } | |
4195 | if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) { | |
4196 | switch (adev->id) { | |
4197 | case PPC440SPE_DMA0_ID: | |
4198 | dma_set_maxpq(&adev->common, | |
4199 | DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0); | |
4200 | break; | |
4201 | case PPC440SPE_DMA1_ID: | |
4202 | dma_set_maxpq(&adev->common, | |
4203 | DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0); | |
4204 | break; | |
4205 | case PPC440SPE_XOR_ID: | |
4206 | adev->common.max_pq = XOR_MAX_OPS * 3; | |
4207 | break; | |
4208 | } | |
4209 | adev->common.device_prep_dma_pq = | |
4210 | ppc440spe_adma_prep_dma_pq; | |
4211 | } | |
4212 | if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) { | |
4213 | switch (adev->id) { | |
4214 | case PPC440SPE_DMA0_ID: | |
4215 | adev->common.max_pq = DMA0_FIFO_SIZE / | |
4216 | sizeof(struct dma_cdb); | |
4217 | break; | |
4218 | case PPC440SPE_DMA1_ID: | |
4219 | adev->common.max_pq = DMA1_FIFO_SIZE / | |
4220 | sizeof(struct dma_cdb); | |
4221 | break; | |
4222 | } | |
4223 | adev->common.device_prep_dma_pq_val = | |
4224 | ppc440spe_adma_prep_dma_pqzero_sum; | |
4225 | } | |
4226 | if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) { | |
4227 | switch (adev->id) { | |
4228 | case PPC440SPE_DMA0_ID: | |
4229 | adev->common.max_xor = DMA0_FIFO_SIZE / | |
4230 | sizeof(struct dma_cdb); | |
4231 | break; | |
4232 | case PPC440SPE_DMA1_ID: | |
4233 | adev->common.max_xor = DMA1_FIFO_SIZE / | |
4234 | sizeof(struct dma_cdb); | |
4235 | break; | |
4236 | } | |
4237 | adev->common.device_prep_dma_xor_val = | |
4238 | ppc440spe_adma_prep_dma_xor_zero_sum; | |
4239 | } | |
4240 | if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) { | |
4241 | adev->common.device_prep_dma_interrupt = | |
4242 | ppc440spe_adma_prep_dma_interrupt; | |
4243 | } | |
4244 | pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " | |
4245 | "( %s%s%s%s%s%s%s)\n", | |
4246 | dev_name(adev->dev), | |
4247 | dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", | |
4248 | dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", | |
4249 | dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "", | |
4250 | dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "", | |
4251 | dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "", | |
4252 | dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "", | |
4253 | dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : ""); | |
4254 | } | |
4255 | ||
4256 | static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev, | |
4257 | struct ppc440spe_adma_chan *chan, | |
4258 | int *initcode) | |
4259 | { | |
2dc11581 | 4260 | struct platform_device *ofdev; |
12458ea0 AG |
4261 | struct device_node *np; |
4262 | int ret; | |
4263 | ||
2dc11581 | 4264 | ofdev = container_of(adev->dev, struct platform_device, dev); |
3e6b02d9 | 4265 | np = ofdev->dev.of_node; |
12458ea0 AG |
4266 | if (adev->id != PPC440SPE_XOR_ID) { |
4267 | adev->err_irq = irq_of_parse_and_map(np, 1); | |
4268 | if (adev->err_irq == NO_IRQ) { | |
4269 | dev_warn(adev->dev, "no err irq resource?\n"); | |
4270 | *initcode = PPC_ADMA_INIT_IRQ2; | |
4271 | adev->err_irq = -ENXIO; | |
4272 | } else | |
4273 | atomic_inc(&ppc440spe_adma_err_irq_ref); | |
4274 | } else { | |
4275 | adev->err_irq = -ENXIO; | |
4276 | } | |
4277 | ||
4278 | adev->irq = irq_of_parse_and_map(np, 0); | |
4279 | if (adev->irq == NO_IRQ) { | |
4280 | dev_err(adev->dev, "no irq resource\n"); | |
4281 | *initcode = PPC_ADMA_INIT_IRQ1; | |
4282 | ret = -ENXIO; | |
4283 | goto err_irq_map; | |
4284 | } | |
4285 | dev_dbg(adev->dev, "irq %d, err irq %d\n", | |
4286 | adev->irq, adev->err_irq); | |
4287 | ||
4288 | ret = request_irq(adev->irq, ppc440spe_adma_eot_handler, | |
4289 | 0, dev_driver_string(adev->dev), chan); | |
4290 | if (ret) { | |
4291 | dev_err(adev->dev, "can't request irq %d\n", | |
4292 | adev->irq); | |
4293 | *initcode = PPC_ADMA_INIT_IRQ1; | |
4294 | ret = -EIO; | |
4295 | goto err_req1; | |
4296 | } | |
4297 | ||
4298 | /* only DMA engines have a separate error IRQ | |
4299 | * so it's Ok if err_irq < 0 in XOR engine case. | |
4300 | */ | |
4301 | if (adev->err_irq > 0) { | |
4302 | /* both DMA engines share common error IRQ */ | |
4303 | ret = request_irq(adev->err_irq, | |
4304 | ppc440spe_adma_err_handler, | |
4305 | IRQF_SHARED, | |
4306 | dev_driver_string(adev->dev), | |
4307 | chan); | |
4308 | if (ret) { | |
4309 | dev_err(adev->dev, "can't request irq %d\n", | |
4310 | adev->err_irq); | |
4311 | *initcode = PPC_ADMA_INIT_IRQ2; | |
4312 | ret = -EIO; | |
4313 | goto err_req2; | |
4314 | } | |
4315 | } | |
4316 | ||
4317 | if (adev->id == PPC440SPE_XOR_ID) { | |
4318 | /* enable XOR engine interrupts */ | |
4319 | iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT | | |
4320 | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT, | |
4321 | &adev->xor_reg->ier); | |
4322 | } else { | |
4323 | u32 mask, enable; | |
4324 | ||
4325 | np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); | |
4326 | if (!np) { | |
4327 | pr_err("%s: can't find I2O device tree node\n", | |
4328 | __func__); | |
4329 | ret = -ENODEV; | |
4330 | goto err_req2; | |
4331 | } | |
4332 | adev->i2o_reg = of_iomap(np, 0); | |
4333 | if (!adev->i2o_reg) { | |
4334 | pr_err("%s: failed to map I2O registers\n", __func__); | |
4335 | of_node_put(np); | |
4336 | ret = -EINVAL; | |
4337 | goto err_req2; | |
4338 | } | |
4339 | of_node_put(np); | |
4340 | /* Unmask 'CS FIFO Attention' interrupts and | |
4341 | * enable generating interrupts on errors | |
4342 | */ | |
4343 | enable = (adev->id == PPC440SPE_DMA0_ID) ? | |
4344 | ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) : | |
4345 | ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM); | |
4346 | mask = ioread32(&adev->i2o_reg->iopim) & enable; | |
4347 | iowrite32(mask, &adev->i2o_reg->iopim); | |
4348 | } | |
4349 | return 0; | |
4350 | ||
4351 | err_req2: | |
4352 | free_irq(adev->irq, chan); | |
4353 | err_req1: | |
4354 | irq_dispose_mapping(adev->irq); | |
4355 | err_irq_map: | |
4356 | if (adev->err_irq > 0) { | |
4357 | if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) | |
4358 | irq_dispose_mapping(adev->err_irq); | |
4359 | } | |
4360 | return ret; | |
4361 | } | |
4362 | ||
4363 | static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev, | |
4364 | struct ppc440spe_adma_chan *chan) | |
4365 | { | |
4366 | u32 mask, disable; | |
4367 | ||
4368 | if (adev->id == PPC440SPE_XOR_ID) { | |
4369 | /* disable XOR engine interrupts */ | |
4370 | mask = ioread32be(&adev->xor_reg->ier); | |
4371 | mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT | | |
4372 | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT); | |
4373 | iowrite32be(mask, &adev->xor_reg->ier); | |
4374 | } else { | |
4375 | /* disable DMAx engine interrupts */ | |
4376 | disable = (adev->id == PPC440SPE_DMA0_ID) ? | |
4377 | (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) : | |
4378 | (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM); | |
4379 | mask = ioread32(&adev->i2o_reg->iopim) | disable; | |
4380 | iowrite32(mask, &adev->i2o_reg->iopim); | |
4381 | } | |
4382 | free_irq(adev->irq, chan); | |
4383 | irq_dispose_mapping(adev->irq); | |
4384 | if (adev->err_irq > 0) { | |
4385 | free_irq(adev->err_irq, chan); | |
4386 | if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) { | |
4387 | irq_dispose_mapping(adev->err_irq); | |
4388 | iounmap(adev->i2o_reg); | |
4389 | } | |
4390 | } | |
4391 | } | |
4392 | ||
4393 | /** | |
4394 | * ppc440spe_adma_probe - probe the asynch device | |
4395 | */ | |
2dc11581 | 4396 | static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, |
12458ea0 AG |
4397 | const struct of_device_id *match) |
4398 | { | |
05c02542 | 4399 | struct device_node *np = ofdev->dev.of_node; |
12458ea0 AG |
4400 | struct resource res; |
4401 | struct ppc440spe_adma_device *adev; | |
4402 | struct ppc440spe_adma_chan *chan; | |
4403 | struct ppc_dma_chan_ref *ref, *_ref; | |
4404 | int ret = 0, initcode = PPC_ADMA_INIT_OK; | |
4405 | const u32 *idx; | |
4406 | int len; | |
4407 | void *regs; | |
4408 | u32 id, pool_size; | |
4409 | ||
4410 | if (of_device_is_compatible(np, "amcc,xor-accelerator")) { | |
4411 | id = PPC440SPE_XOR_ID; | |
4412 | /* As far as the XOR engine is concerned, it does not | |
4413 | * use FIFOs but uses linked list. So there is no dependency | |
4414 | * between pool size to allocate and the engine configuration. | |
4415 | */ | |
4416 | pool_size = PAGE_SIZE << 1; | |
4417 | } else { | |
4418 | /* it is DMA0 or DMA1 */ | |
4419 | idx = of_get_property(np, "cell-index", &len); | |
4420 | if (!idx || (len != sizeof(u32))) { | |
4421 | dev_err(&ofdev->dev, "Device node %s has missing " | |
4422 | "or invalid cell-index property\n", | |
4423 | np->full_name); | |
4424 | return -EINVAL; | |
4425 | } | |
4426 | id = *idx; | |
4427 | /* DMA0,1 engines use FIFO to maintain CDBs, so we | |
4428 | * should allocate the pool accordingly to size of this | |
4429 | * FIFO. Thus, the pool size depends on the FIFO depth: | |
4430 | * how much CDBs pointers the FIFO may contain then so | |
4431 | * much CDBs we should provide in the pool. | |
4432 | * That is | |
4433 | * CDB size = 32B; | |
4434 | * CDBs number = (DMA0_FIFO_SIZE >> 3); | |
4435 | * Pool size = CDBs number * CDB size = | |
4436 | * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2. | |
4437 | */ | |
4438 | pool_size = (id == PPC440SPE_DMA0_ID) ? | |
4439 | DMA0_FIFO_SIZE : DMA1_FIFO_SIZE; | |
4440 | pool_size <<= 2; | |
4441 | } | |
4442 | ||
4443 | if (of_address_to_resource(np, 0, &res)) { | |
4444 | dev_err(&ofdev->dev, "failed to get memory resource\n"); | |
4445 | initcode = PPC_ADMA_INIT_MEMRES; | |
4446 | ret = -ENODEV; | |
4447 | goto out; | |
4448 | } | |
4449 | ||
4450 | if (!request_mem_region(res.start, resource_size(&res), | |
4451 | dev_driver_string(&ofdev->dev))) { | |
4452 | dev_err(&ofdev->dev, "failed to request memory region " | |
4453 | "(0x%016llx-0x%016llx)\n", | |
4454 | (u64)res.start, (u64)res.end); | |
4455 | initcode = PPC_ADMA_INIT_MEMREG; | |
4456 | ret = -EBUSY; | |
4457 | goto out; | |
4458 | } | |
4459 | ||
4460 | /* create a device */ | |
4461 | adev = kzalloc(sizeof(*adev), GFP_KERNEL); | |
4462 | if (!adev) { | |
4463 | dev_err(&ofdev->dev, "failed to allocate device\n"); | |
4464 | initcode = PPC_ADMA_INIT_ALLOC; | |
4465 | ret = -ENOMEM; | |
4466 | goto err_adev_alloc; | |
4467 | } | |
4468 | ||
4469 | adev->id = id; | |
4470 | adev->pool_size = pool_size; | |
4471 | /* allocate coherent memory for hardware descriptors */ | |
4472 | adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev, | |
4473 | adev->pool_size, &adev->dma_desc_pool, | |
4474 | GFP_KERNEL); | |
4475 | if (adev->dma_desc_pool_virt == NULL) { | |
4476 | dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent " | |
4477 | "memory for hardware descriptors\n", | |
4478 | adev->pool_size); | |
4479 | initcode = PPC_ADMA_INIT_COHERENT; | |
4480 | ret = -ENOMEM; | |
4481 | goto err_dma_alloc; | |
4482 | } | |
4483 | dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n", | |
4484 | adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); | |
4485 | ||
4486 | regs = ioremap(res.start, resource_size(&res)); | |
4487 | if (!regs) { | |
4488 | dev_err(&ofdev->dev, "failed to ioremap regs!\n"); | |
4489 | goto err_regs_alloc; | |
4490 | } | |
4491 | ||
4492 | if (adev->id == PPC440SPE_XOR_ID) { | |
4493 | adev->xor_reg = regs; | |
4494 | /* Reset XOR */ | |
4495 | iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr); | |
4496 | iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr); | |
4497 | } else { | |
4498 | size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ? | |
4499 | DMA0_FIFO_SIZE : DMA1_FIFO_SIZE; | |
4500 | adev->dma_reg = regs; | |
4501 | /* DMAx_FIFO_SIZE is defined in bytes, | |
4502 | * <fsiz> - is defined in number of CDB pointers (8byte). | |
4503 | * DMA FIFO Length = CSlength + CPlength, where | |
4504 | * CSlength = CPlength = (fsiz + 1) * 8. | |
4505 | */ | |
4506 | iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2), | |
4507 | &adev->dma_reg->fsiz); | |
4508 | /* Configure DMA engine */ | |
4509 | iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN, | |
4510 | &adev->dma_reg->cfg); | |
4511 | /* Clear Status */ | |
4512 | iowrite32(~0, &adev->dma_reg->dsts); | |
4513 | } | |
4514 | ||
4515 | adev->dev = &ofdev->dev; | |
4516 | adev->common.dev = &ofdev->dev; | |
4517 | INIT_LIST_HEAD(&adev->common.channels); | |
4518 | dev_set_drvdata(&ofdev->dev, adev); | |
4519 | ||
4520 | /* create a channel */ | |
4521 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | |
4522 | if (!chan) { | |
4523 | dev_err(&ofdev->dev, "can't allocate channel structure\n"); | |
4524 | initcode = PPC_ADMA_INIT_CHANNEL; | |
4525 | ret = -ENOMEM; | |
4526 | goto err_chan_alloc; | |
4527 | } | |
4528 | ||
4529 | spin_lock_init(&chan->lock); | |
4530 | INIT_LIST_HEAD(&chan->chain); | |
4531 | INIT_LIST_HEAD(&chan->all_slots); | |
4532 | chan->device = adev; | |
4533 | chan->common.device = &adev->common; | |
4534 | list_add_tail(&chan->common.device_node, &adev->common.channels); | |
4535 | tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, | |
4536 | (unsigned long)chan); | |
4537 | ||
4538 | /* allocate and map helper pages for async validation or | |
4539 | * async_mult/async_sum_product operations on DMA0/1. | |
4540 | */ | |
4541 | if (adev->id != PPC440SPE_XOR_ID) { | |
4542 | chan->pdest_page = alloc_page(GFP_KERNEL); | |
4543 | chan->qdest_page = alloc_page(GFP_KERNEL); | |
4544 | if (!chan->pdest_page || | |
4545 | !chan->qdest_page) { | |
4546 | if (chan->pdest_page) | |
4547 | __free_page(chan->pdest_page); | |
4548 | if (chan->qdest_page) | |
4549 | __free_page(chan->qdest_page); | |
4550 | ret = -ENOMEM; | |
4551 | goto err_page_alloc; | |
4552 | } | |
4553 | chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0, | |
4554 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4555 | chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0, | |
4556 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4557 | } | |
4558 | ||
4559 | ref = kmalloc(sizeof(*ref), GFP_KERNEL); | |
4560 | if (ref) { | |
4561 | ref->chan = &chan->common; | |
4562 | INIT_LIST_HEAD(&ref->node); | |
4563 | list_add_tail(&ref->node, &ppc440spe_adma_chan_list); | |
4564 | } else { | |
4565 | dev_err(&ofdev->dev, "failed to allocate channel reference!\n"); | |
4566 | ret = -ENOMEM; | |
4567 | goto err_ref_alloc; | |
4568 | } | |
4569 | ||
4570 | ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode); | |
4571 | if (ret) | |
4572 | goto err_irq; | |
4573 | ||
4574 | ppc440spe_adma_init_capabilities(adev); | |
4575 | ||
4576 | ret = dma_async_device_register(&adev->common); | |
4577 | if (ret) { | |
4578 | initcode = PPC_ADMA_INIT_REGISTER; | |
4579 | dev_err(&ofdev->dev, "failed to register dma device\n"); | |
4580 | goto err_dev_reg; | |
4581 | } | |
4582 | ||
4583 | goto out; | |
4584 | ||
4585 | err_dev_reg: | |
4586 | ppc440spe_adma_release_irqs(adev, chan); | |
4587 | err_irq: | |
4588 | list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) { | |
4589 | if (chan == to_ppc440spe_adma_chan(ref->chan)) { | |
4590 | list_del(&ref->node); | |
4591 | kfree(ref); | |
4592 | } | |
4593 | } | |
4594 | err_ref_alloc: | |
4595 | if (adev->id != PPC440SPE_XOR_ID) { | |
4596 | dma_unmap_page(&ofdev->dev, chan->pdest, | |
4597 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4598 | dma_unmap_page(&ofdev->dev, chan->qdest, | |
4599 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4600 | __free_page(chan->pdest_page); | |
4601 | __free_page(chan->qdest_page); | |
4602 | } | |
4603 | err_page_alloc: | |
4604 | kfree(chan); | |
4605 | err_chan_alloc: | |
4606 | if (adev->id == PPC440SPE_XOR_ID) | |
4607 | iounmap(adev->xor_reg); | |
4608 | else | |
4609 | iounmap(adev->dma_reg); | |
4610 | err_regs_alloc: | |
4611 | dma_free_coherent(adev->dev, adev->pool_size, | |
4612 | adev->dma_desc_pool_virt, | |
4613 | adev->dma_desc_pool); | |
4614 | err_dma_alloc: | |
4615 | kfree(adev); | |
4616 | err_adev_alloc: | |
4617 | release_mem_region(res.start, resource_size(&res)); | |
4618 | out: | |
4619 | if (id < PPC440SPE_ADMA_ENGINES_NUM) | |
4620 | ppc440spe_adma_devices[id] = initcode; | |
4621 | ||
4622 | return ret; | |
4623 | } | |
4624 | ||
4625 | /** | |
4626 | * ppc440spe_adma_remove - remove the asynch device | |
4627 | */ | |
2dc11581 | 4628 | static int __devexit ppc440spe_adma_remove(struct platform_device *ofdev) |
12458ea0 AG |
4629 | { |
4630 | struct ppc440spe_adma_device *adev = dev_get_drvdata(&ofdev->dev); | |
05c02542 | 4631 | struct device_node *np = ofdev->dev.of_node; |
12458ea0 AG |
4632 | struct resource res; |
4633 | struct dma_chan *chan, *_chan; | |
4634 | struct ppc_dma_chan_ref *ref, *_ref; | |
4635 | struct ppc440spe_adma_chan *ppc440spe_chan; | |
4636 | ||
4637 | dev_set_drvdata(&ofdev->dev, NULL); | |
4638 | if (adev->id < PPC440SPE_ADMA_ENGINES_NUM) | |
4639 | ppc440spe_adma_devices[adev->id] = -1; | |
4640 | ||
4641 | dma_async_device_unregister(&adev->common); | |
4642 | ||
4643 | list_for_each_entry_safe(chan, _chan, &adev->common.channels, | |
4644 | device_node) { | |
4645 | ppc440spe_chan = to_ppc440spe_adma_chan(chan); | |
4646 | ppc440spe_adma_release_irqs(adev, ppc440spe_chan); | |
4647 | tasklet_kill(&ppc440spe_chan->irq_tasklet); | |
4648 | if (adev->id != PPC440SPE_XOR_ID) { | |
4649 | dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest, | |
4650 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4651 | dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest, | |
4652 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
4653 | __free_page(ppc440spe_chan->pdest_page); | |
4654 | __free_page(ppc440spe_chan->qdest_page); | |
4655 | } | |
4656 | list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, | |
4657 | node) { | |
4658 | if (ppc440spe_chan == | |
4659 | to_ppc440spe_adma_chan(ref->chan)) { | |
4660 | list_del(&ref->node); | |
4661 | kfree(ref); | |
4662 | } | |
4663 | } | |
4664 | list_del(&chan->device_node); | |
4665 | kfree(ppc440spe_chan); | |
4666 | } | |
4667 | ||
4668 | dma_free_coherent(adev->dev, adev->pool_size, | |
4669 | adev->dma_desc_pool_virt, adev->dma_desc_pool); | |
4670 | if (adev->id == PPC440SPE_XOR_ID) | |
4671 | iounmap(adev->xor_reg); | |
4672 | else | |
4673 | iounmap(adev->dma_reg); | |
4674 | of_address_to_resource(np, 0, &res); | |
4675 | release_mem_region(res.start, resource_size(&res)); | |
4676 | kfree(adev); | |
4677 | return 0; | |
4678 | } | |
4679 | ||
4680 | /* | |
4681 | * /sys driver interface to enable h/w RAID-6 capabilities | |
4682 | * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/ | |
4683 | * directory are "devices", "enable" and "poly". | |
4684 | * "devices" shows available engines. | |
4685 | * "enable" is used to enable RAID-6 capabilities or to check | |
4686 | * whether these has been activated. | |
4687 | * "poly" allows setting/checking used polynomial (for PPC440SPe only). | |
4688 | */ | |
4689 | ||
4690 | static ssize_t show_ppc440spe_devices(struct device_driver *dev, char *buf) | |
4691 | { | |
4692 | ssize_t size = 0; | |
4693 | int i; | |
4694 | ||
4695 | for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) { | |
4696 | if (ppc440spe_adma_devices[i] == -1) | |
4697 | continue; | |
4698 | size += snprintf(buf + size, PAGE_SIZE - size, | |
4699 | "PPC440SP(E)-ADMA.%d: %s\n", i, | |
4700 | ppc_adma_errors[ppc440spe_adma_devices[i]]); | |
4701 | } | |
4702 | return size; | |
4703 | } | |
4704 | ||
4705 | static ssize_t show_ppc440spe_r6enable(struct device_driver *dev, char *buf) | |
4706 | { | |
4707 | return snprintf(buf, PAGE_SIZE, | |
4708 | "PPC440SP(e) RAID-6 capabilities are %sABLED.\n", | |
4709 | ppc440spe_r6_enabled ? "EN" : "DIS"); | |
4710 | } | |
4711 | ||
4712 | static ssize_t store_ppc440spe_r6enable(struct device_driver *dev, | |
4713 | const char *buf, size_t count) | |
4714 | { | |
4715 | unsigned long val; | |
4716 | ||
4717 | if (!count || count > 11) | |
4718 | return -EINVAL; | |
4719 | ||
4720 | if (!ppc440spe_r6_tchan) | |
4721 | return -EFAULT; | |
4722 | ||
4723 | /* Write a key */ | |
4724 | sscanf(buf, "%lx", &val); | |
4725 | dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val); | |
4726 | isync(); | |
4727 | ||
4728 | /* Verify whether it really works now */ | |
4729 | if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) { | |
4730 | pr_info("PPC440SP(e) RAID-6 has been activated " | |
4731 | "successfully\n"); | |
4732 | ppc440spe_r6_enabled = 1; | |
4733 | } else { | |
4734 | pr_info("PPC440SP(e) RAID-6 hasn't been activated!" | |
4735 | " Error key ?\n"); | |
4736 | ppc440spe_r6_enabled = 0; | |
4737 | } | |
4738 | return count; | |
4739 | } | |
4740 | ||
4741 | static ssize_t show_ppc440spe_r6poly(struct device_driver *dev, char *buf) | |
4742 | { | |
4743 | ssize_t size = 0; | |
4744 | u32 reg; | |
4745 | ||
4746 | #ifdef CONFIG_440SP | |
4747 | /* 440SP has fixed polynomial */ | |
4748 | reg = 0x4d; | |
4749 | #else | |
4750 | reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL); | |
4751 | reg >>= MQ0_CFBHL_POLY; | |
4752 | reg &= 0xFF; | |
4753 | #endif | |
4754 | ||
4755 | size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver " | |
4756 | "uses 0x1%02x polynomial.\n", reg); | |
4757 | return size; | |
4758 | } | |
4759 | ||
4760 | static ssize_t store_ppc440spe_r6poly(struct device_driver *dev, | |
4761 | const char *buf, size_t count) | |
4762 | { | |
4763 | unsigned long reg, val; | |
4764 | ||
4765 | #ifdef CONFIG_440SP | |
4766 | /* 440SP uses default 0x14D polynomial only */ | |
4767 | return -EINVAL; | |
4768 | #endif | |
4769 | ||
4770 | if (!count || count > 6) | |
4771 | return -EINVAL; | |
4772 | ||
4773 | /* e.g., 0x14D or 0x11D */ | |
4774 | sscanf(buf, "%lx", &val); | |
4775 | ||
4776 | if (val & ~0x1FF) | |
4777 | return -EINVAL; | |
4778 | ||
4779 | val &= 0xFF; | |
4780 | reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL); | |
4781 | reg &= ~(0xFF << MQ0_CFBHL_POLY); | |
4782 | reg |= val << MQ0_CFBHL_POLY; | |
4783 | dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg); | |
4784 | ||
4785 | return count; | |
4786 | } | |
4787 | ||
4788 | static DRIVER_ATTR(devices, S_IRUGO, show_ppc440spe_devices, NULL); | |
4789 | static DRIVER_ATTR(enable, S_IRUGO | S_IWUSR, show_ppc440spe_r6enable, | |
4790 | store_ppc440spe_r6enable); | |
4791 | static DRIVER_ATTR(poly, S_IRUGO | S_IWUSR, show_ppc440spe_r6poly, | |
4792 | store_ppc440spe_r6poly); | |
4793 | ||
4794 | /* | |
4795 | * Common initialisation for RAID engines; allocate memory for | |
4796 | * DMAx FIFOs, perform configuration common for all DMA engines. | |
4797 | * Further DMA engine specific configuration is done at probe time. | |
4798 | */ | |
4799 | static int ppc440spe_configure_raid_devices(void) | |
4800 | { | |
4801 | struct device_node *np; | |
4802 | struct resource i2o_res; | |
4803 | struct i2o_regs __iomem *i2o_reg; | |
4804 | dcr_host_t i2o_dcr_host; | |
4805 | unsigned int dcr_base, dcr_len; | |
4806 | int i, ret; | |
4807 | ||
4808 | np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe"); | |
4809 | if (!np) { | |
4810 | pr_err("%s: can't find I2O device tree node\n", | |
4811 | __func__); | |
4812 | return -ENODEV; | |
4813 | } | |
4814 | ||
4815 | if (of_address_to_resource(np, 0, &i2o_res)) { | |
4816 | of_node_put(np); | |
4817 | return -EINVAL; | |
4818 | } | |
4819 | ||
4820 | i2o_reg = of_iomap(np, 0); | |
4821 | if (!i2o_reg) { | |
4822 | pr_err("%s: failed to map I2O registers\n", __func__); | |
4823 | of_node_put(np); | |
4824 | return -EINVAL; | |
4825 | } | |
4826 | ||
4827 | /* Get I2O DCRs base */ | |
4828 | dcr_base = dcr_resource_start(np, 0); | |
4829 | dcr_len = dcr_resource_len(np, 0); | |
4830 | if (!dcr_base && !dcr_len) { | |
4831 | pr_err("%s: can't get DCR registers base/len!\n", | |
4832 | np->full_name); | |
4833 | of_node_put(np); | |
4834 | iounmap(i2o_reg); | |
4835 | return -ENODEV; | |
4836 | } | |
4837 | ||
4838 | i2o_dcr_host = dcr_map(np, dcr_base, dcr_len); | |
4839 | if (!DCR_MAP_OK(i2o_dcr_host)) { | |
4840 | pr_err("%s: failed to map DCRs!\n", np->full_name); | |
4841 | of_node_put(np); | |
4842 | iounmap(i2o_reg); | |
4843 | return -ENODEV; | |
4844 | } | |
4845 | of_node_put(np); | |
4846 | ||
4847 | /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share | |
4848 | * the base address of FIFO memory space. | |
4849 | * Actually we need twice more physical memory than programmed in the | |
4850 | * <fsiz> register (because there are two FIFOs for each DMA: CP and CS) | |
4851 | */ | |
4852 | ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1, | |
4853 | GFP_KERNEL); | |
4854 | if (!ppc440spe_dma_fifo_buf) { | |
4855 | pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__); | |
4856 | iounmap(i2o_reg); | |
4857 | dcr_unmap(i2o_dcr_host, dcr_len); | |
4858 | return -ENOMEM; | |
4859 | } | |
4860 | ||
4861 | /* | |
4862 | * Configure h/w | |
4863 | */ | |
4864 | /* Reset I2O/DMA */ | |
4865 | mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA); | |
4866 | mtdcri(SDR0, DCRN_SDR0_SRST, 0); | |
4867 | ||
4868 | /* Setup the base address of mmaped registers */ | |
4869 | dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32)); | |
4870 | dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) | | |
4871 | I2O_REG_ENABLE); | |
4872 | dcr_unmap(i2o_dcr_host, dcr_len); | |
4873 | ||
4874 | /* Setup FIFO memory space base address */ | |
4875 | iowrite32(0, &i2o_reg->ifbah); | |
4876 | iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal); | |
4877 | ||
4878 | /* set zero FIFO size for I2O, so the whole | |
4879 | * ppc440spe_dma_fifo_buf is used by DMAs. | |
4880 | * DMAx_FIFOs will be configured while probe. | |
4881 | */ | |
4882 | iowrite32(0, &i2o_reg->ifsiz); | |
4883 | iounmap(i2o_reg); | |
4884 | ||
4885 | /* To prepare WXOR/RXOR functionality we need access to | |
4886 | * Memory Queue Module DCRs (finally it will be enabled | |
4887 | * via /sys interface of the ppc440spe ADMA driver). | |
4888 | */ | |
4889 | np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe"); | |
4890 | if (!np) { | |
4891 | pr_err("%s: can't find MQ device tree node\n", | |
4892 | __func__); | |
4893 | ret = -ENODEV; | |
4894 | goto out_free; | |
4895 | } | |
4896 | ||
4897 | /* Get MQ DCRs base */ | |
4898 | dcr_base = dcr_resource_start(np, 0); | |
4899 | dcr_len = dcr_resource_len(np, 0); | |
4900 | if (!dcr_base && !dcr_len) { | |
4901 | pr_err("%s: can't get DCR registers base/len!\n", | |
4902 | np->full_name); | |
4903 | ret = -ENODEV; | |
4904 | goto out_mq; | |
4905 | } | |
4906 | ||
4907 | ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len); | |
4908 | if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) { | |
4909 | pr_err("%s: failed to map DCRs!\n", np->full_name); | |
4910 | ret = -ENODEV; | |
4911 | goto out_mq; | |
4912 | } | |
4913 | of_node_put(np); | |
4914 | ppc440spe_mq_dcr_len = dcr_len; | |
4915 | ||
4916 | /* Set HB alias */ | |
4917 | dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB); | |
4918 | ||
4919 | /* Set: | |
4920 | * - LL transaction passing limit to 1; | |
4921 | * - Memory controller cycle limit to 1; | |
4922 | * - Galois Polynomial to 0x14d (default) | |
4923 | */ | |
4924 | dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, | |
4925 | (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) | | |
4926 | (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY)); | |
4927 | ||
4928 | atomic_set(&ppc440spe_adma_err_irq_ref, 0); | |
4929 | for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) | |
4930 | ppc440spe_adma_devices[i] = -1; | |
4931 | ||
4932 | return 0; | |
4933 | ||
4934 | out_mq: | |
4935 | of_node_put(np); | |
4936 | out_free: | |
4937 | kfree(ppc440spe_dma_fifo_buf); | |
4938 | return ret; | |
4939 | } | |
4940 | ||
4b1cf1fa | 4941 | static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { |
12458ea0 AG |
4942 | { .compatible = "ibm,dma-440spe", }, |
4943 | { .compatible = "amcc,xor-accelerator", }, | |
4944 | {}, | |
4945 | }; | |
4946 | MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match); | |
4947 | ||
4948 | static struct of_platform_driver ppc440spe_adma_driver = { | |
12458ea0 AG |
4949 | .probe = ppc440spe_adma_probe, |
4950 | .remove = __devexit_p(ppc440spe_adma_remove), | |
4951 | .driver = { | |
4952 | .name = "PPC440SP(E)-ADMA", | |
4953 | .owner = THIS_MODULE, | |
4018294b | 4954 | .of_match_table = ppc440spe_adma_of_match, |
12458ea0 AG |
4955 | }, |
4956 | }; | |
4957 | ||
4958 | static __init int ppc440spe_adma_init(void) | |
4959 | { | |
4960 | int ret; | |
4961 | ||
4962 | ret = ppc440spe_configure_raid_devices(); | |
4963 | if (ret) | |
4964 | return ret; | |
4965 | ||
4966 | ret = of_register_platform_driver(&ppc440spe_adma_driver); | |
4967 | if (ret) { | |
4968 | pr_err("%s: failed to register platform driver\n", | |
4969 | __func__); | |
4970 | goto out_reg; | |
4971 | } | |
4972 | ||
4973 | /* Initialization status */ | |
4974 | ret = driver_create_file(&ppc440spe_adma_driver.driver, | |
4975 | &driver_attr_devices); | |
4976 | if (ret) | |
4977 | goto out_dev; | |
4978 | ||
4979 | /* RAID-6 h/w enable entry */ | |
4980 | ret = driver_create_file(&ppc440spe_adma_driver.driver, | |
4981 | &driver_attr_enable); | |
4982 | if (ret) | |
4983 | goto out_en; | |
4984 | ||
4985 | /* GF polynomial to use */ | |
4986 | ret = driver_create_file(&ppc440spe_adma_driver.driver, | |
4987 | &driver_attr_poly); | |
4988 | if (!ret) | |
4989 | return ret; | |
4990 | ||
4991 | driver_remove_file(&ppc440spe_adma_driver.driver, | |
4992 | &driver_attr_enable); | |
4993 | out_en: | |
4994 | driver_remove_file(&ppc440spe_adma_driver.driver, | |
4995 | &driver_attr_devices); | |
4996 | out_dev: | |
4997 | /* User will not be able to enable h/w RAID-6 */ | |
4998 | pr_err("%s: failed to create RAID-6 driver interface\n", | |
4999 | __func__); | |
5000 | of_unregister_platform_driver(&ppc440spe_adma_driver); | |
5001 | out_reg: | |
5002 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); | |
5003 | kfree(ppc440spe_dma_fifo_buf); | |
5004 | return ret; | |
5005 | } | |
5006 | ||
5007 | static void __exit ppc440spe_adma_exit(void) | |
5008 | { | |
5009 | driver_remove_file(&ppc440spe_adma_driver.driver, | |
5010 | &driver_attr_poly); | |
5011 | driver_remove_file(&ppc440spe_adma_driver.driver, | |
5012 | &driver_attr_enable); | |
5013 | driver_remove_file(&ppc440spe_adma_driver.driver, | |
5014 | &driver_attr_devices); | |
5015 | of_unregister_platform_driver(&ppc440spe_adma_driver); | |
5016 | dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len); | |
5017 | kfree(ppc440spe_dma_fifo_buf); | |
5018 | } | |
5019 | ||
5020 | arch_initcall(ppc440spe_adma_init); | |
5021 | module_exit(ppc440spe_adma_exit); | |
5022 | ||
5023 | MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>"); | |
5024 | MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver"); | |
5025 | MODULE_LICENSE("GPL"); |