]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/dma/mv_xor.c
dma: mv_xor: get rid of the pdev pointer in mv_xor_device
[mirror_ubuntu-zesty-kernel.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
c510182b 28#include <linux/clk.h>
c02cecb9 29#include <linux/platform_data/dma-mv_xor.h>
d2ebfb33
RKAL
30
31#include "dmaengine.h"
ff7b0479
SB
32#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common)
38
ff7b0479
SB
39#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
c98c1781
TP
42#define mv_chan_to_devp(chan) \
43 ((chan)->device->common.dev)
44
ff7b0479
SB
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
86363682 168 u32 val = ~(1 << (chan->idx * 16));
c98c1781 169 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
ff7b0479
SB
170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
c98c1781 209 dev_err(mv_chan_to_devp(chan),
a3fc74bc
TP
210 "error: unsupported operation %d.\n",
211 type);
ff7b0479
SB
212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
c98c1781 226 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
ff7b0479
SB
227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
c98c1781 254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
ff7b0479
SB
255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
c98c1781 269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
ff7b0479
SB
270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common);
288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
ecde6cd4 311 struct device *dev = mv_chan_to_devp(mv_chan);
ff7b0479 312 u32 len = unmap->unmap_len;
e1d181ef
DW
313 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 u32 src_cnt;
315 dma_addr_t addr;
a06d568f 316 dma_addr_t dest;
ff7b0479 317
a06d568f
DW
318 src_cnt = unmap->unmap_src_cnt;
319 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
321 enum dma_data_direction dir;
322
323 if (src_cnt > 1) /* is xor ? */
324 dir = DMA_BIDIRECTIONAL;
325 else
326 dir = DMA_FROM_DEVICE;
327 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
328 }
329
330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
331 while (src_cnt--) {
332 addr = mv_desc_get_src_addr(unmap,
333 src_cnt);
a06d568f
DW
334 if (addr == dest)
335 continue;
e1d181ef
DW
336 dma_unmap_page(dev, addr, len,
337 DMA_TO_DEVICE);
338 }
ff7b0479
SB
339 }
340 desc->group_head = NULL;
341 }
342 }
343
344 /* run dependent operations */
07f2211e 345 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
346
347 return cookie;
348}
349
350static int
351mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352{
353 struct mv_xor_desc_slot *iter, *_iter;
354
c98c1781 355 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
ff7b0479
SB
356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 completed_node) {
358
359 if (async_tx_test_ack(&iter->async_tx)) {
360 list_del(&iter->completed_node);
361 mv_xor_free_slots(mv_chan, iter);
362 }
363 }
364 return 0;
365}
366
367static int
368mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 struct mv_xor_chan *mv_chan)
370{
c98c1781 371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
ff7b0479
SB
372 __func__, __LINE__, desc, desc->async_tx.flags);
373 list_del(&desc->chain_node);
374 /* the client is allowed to attach dependent operations
375 * until 'ack' is set
376 */
377 if (!async_tx_test_ack(&desc->async_tx)) {
378 /* move this slot to the completed_slots */
379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 return 0;
381 }
382
383 mv_xor_free_slots(mv_chan, desc);
384 return 0;
385}
386
387static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388{
389 struct mv_xor_desc_slot *iter, *_iter;
390 dma_cookie_t cookie = 0;
391 int busy = mv_chan_is_busy(mv_chan);
392 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 int seen_current = 0;
394
c98c1781
TP
395 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
396 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
ff7b0479
SB
397 mv_xor_clean_completed_slots(mv_chan);
398
399 /* free completed slots from the chain starting with
400 * the oldest descriptor
401 */
402
403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 chain_node) {
405 prefetch(_iter);
406 prefetch(&_iter->async_tx);
407
408 /* do not advance past the current descriptor loaded into the
409 * hardware channel, subsequent descriptors are either in
410 * process or have not been submitted
411 */
412 if (seen_current)
413 break;
414
415 /* stop the search if we reach the current descriptor and the
416 * channel is busy
417 */
418 if (iter->async_tx.phys == current_desc) {
419 seen_current = 1;
420 if (busy)
421 break;
422 }
423
424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425
426 if (mv_xor_clean_slot(iter, mv_chan))
427 break;
428 }
429
430 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 struct mv_xor_desc_slot *chain_head;
432 chain_head = list_entry(mv_chan->chain.next,
433 struct mv_xor_desc_slot,
434 chain_node);
435
436 mv_xor_start_new_chain(mv_chan, chain_head);
437 }
438
439 if (cookie > 0)
4d4e58de 440 mv_chan->common.completed_cookie = cookie;
ff7b0479
SB
441}
442
443static void
444mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445{
446 spin_lock_bh(&mv_chan->lock);
447 __mv_xor_slot_cleanup(mv_chan);
448 spin_unlock_bh(&mv_chan->lock);
449}
450
451static void mv_xor_tasklet(unsigned long data)
452{
453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 454 mv_xor_slot_cleanup(chan);
ff7b0479
SB
455}
456
457static struct mv_xor_desc_slot *
458mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 int slots_per_op)
460{
461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 LIST_HEAD(chain);
463 int slots_found, retry = 0;
464
465 /* start search from the last allocated descrtiptor
466 * if a contiguous allocation can not be found start searching
467 * from the beginning of the list
468 */
469retry:
470 slots_found = 0;
471 if (retry == 0)
472 iter = mv_chan->last_used;
473 else
474 iter = list_entry(&mv_chan->all_slots,
475 struct mv_xor_desc_slot,
476 slot_node);
477
478 list_for_each_entry_safe_continue(
479 iter, _iter, &mv_chan->all_slots, slot_node) {
480 prefetch(_iter);
481 prefetch(&_iter->async_tx);
482 if (iter->slots_per_op) {
483 /* give up after finding the first busy slot
484 * on the second pass through the list
485 */
486 if (retry)
487 break;
488
489 slots_found = 0;
490 continue;
491 }
492
493 /* start the allocation if the slot is correctly aligned */
494 if (!slots_found++)
495 alloc_start = iter;
496
497 if (slots_found == num_slots) {
498 struct mv_xor_desc_slot *alloc_tail = NULL;
499 struct mv_xor_desc_slot *last_used = NULL;
500 iter = alloc_start;
501 while (num_slots) {
502 int i;
503
504 /* pre-ack all but the last descriptor */
505 async_tx_ack(&iter->async_tx);
506
507 list_add_tail(&iter->chain_node, &chain);
508 alloc_tail = iter;
509 iter->async_tx.cookie = 0;
510 iter->slot_cnt = num_slots;
511 iter->xor_check_result = NULL;
512 for (i = 0; i < slots_per_op; i++) {
513 iter->slots_per_op = slots_per_op - i;
514 last_used = iter;
515 iter = list_entry(iter->slot_node.next,
516 struct mv_xor_desc_slot,
517 slot_node);
518 }
519 num_slots -= slots_per_op;
520 }
521 alloc_tail->group_head = alloc_start;
522 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 523 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
524 mv_chan->last_used = last_used;
525 mv_desc_clear_next_desc(alloc_start);
526 mv_desc_clear_next_desc(alloc_tail);
527 return alloc_tail;
528 }
529 }
530 if (!retry++)
531 goto retry;
532
533 /* try to free some slots if the allocation fails */
534 tasklet_schedule(&mv_chan->irq_tasklet);
535
536 return NULL;
537}
538
ff7b0479
SB
539/************************ DMA engine API functions ****************************/
540static dma_cookie_t
541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
542{
543 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
545 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
546 dma_cookie_t cookie;
547 int new_hw_chain = 1;
548
c98c1781 549 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
550 "%s sw_desc %p: async_tx %p\n",
551 __func__, sw_desc, &sw_desc->async_tx);
552
553 grp_start = sw_desc->group_head;
554
555 spin_lock_bh(&mv_chan->lock);
884485e1 556 cookie = dma_cookie_assign(tx);
ff7b0479
SB
557
558 if (list_empty(&mv_chan->chain))
64203b67 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
560 else {
561 new_hw_chain = 0;
562
563 old_chain_tail = list_entry(mv_chan->chain.prev,
564 struct mv_xor_desc_slot,
565 chain_node);
64203b67 566 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
567 &old_chain_tail->chain_node);
568
569 if (!mv_can_chain(grp_start))
570 goto submit_done;
571
c98c1781 572 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
ff7b0479
SB
573 old_chain_tail->async_tx.phys);
574
575 /* fix up the hardware chain */
576 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
577
578 /* if the channel is not busy */
579 if (!mv_chan_is_busy(mv_chan)) {
580 u32 current_desc = mv_chan_get_current_desc(mv_chan);
581 /*
582 * and the curren desc is the end of the chain before
583 * the append, then we need to start the channel
584 */
585 if (current_desc == old_chain_tail->async_tx.phys)
586 new_hw_chain = 1;
587 }
588 }
589
590 if (new_hw_chain)
591 mv_xor_start_new_chain(mv_chan, grp_start);
592
593submit_done:
594 spin_unlock_bh(&mv_chan->lock);
595
596 return cookie;
597}
598
599/* returns the number of allocated descriptors */
aa1e6f1a 600static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
601{
602 char *hw_desc;
603 int idx;
604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 struct mv_xor_desc_slot *slot = NULL;
09f2b786 606 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE;
ff7b0479
SB
607
608 /* Allocate descriptor slots */
609 idx = mv_chan->slots_allocated;
610 while (idx < num_descs_in_pool) {
611 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
612 if (!slot) {
613 printk(KERN_INFO "MV XOR Channel only initialized"
614 " %d descriptor slots", idx);
615 break;
616 }
617 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
619
620 dma_async_tx_descriptor_init(&slot->async_tx, chan);
621 slot->async_tx.tx_submit = mv_xor_tx_submit;
622 INIT_LIST_HEAD(&slot->chain_node);
623 INIT_LIST_HEAD(&slot->slot_node);
64203b67 624 INIT_LIST_HEAD(&slot->tx_list);
ff7b0479
SB
625 hw_desc = (char *) mv_chan->device->dma_desc_pool;
626 slot->async_tx.phys =
627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
628 slot->idx = idx++;
629
630 spin_lock_bh(&mv_chan->lock);
631 mv_chan->slots_allocated = idx;
632 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
633 spin_unlock_bh(&mv_chan->lock);
634 }
635
636 if (mv_chan->slots_allocated && !mv_chan->last_used)
637 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
638 struct mv_xor_desc_slot,
639 slot_node);
640
c98c1781 641 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
642 "allocated %d descriptor slots last_used: %p\n",
643 mv_chan->slots_allocated, mv_chan->last_used);
644
645 return mv_chan->slots_allocated ? : -ENOMEM;
646}
647
648static struct dma_async_tx_descriptor *
649mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
650 size_t len, unsigned long flags)
651{
652 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
653 struct mv_xor_desc_slot *sw_desc, *grp_start;
654 int slot_cnt;
655
c98c1781 656 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
657 "%s dest: %x src %x len: %u flags: %ld\n",
658 __func__, dest, src, len, flags);
659 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
660 return NULL;
661
7912d300 662 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
663
664 spin_lock_bh(&mv_chan->lock);
665 slot_cnt = mv_chan_memcpy_slot_count(len);
666 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
667 if (sw_desc) {
668 sw_desc->type = DMA_MEMCPY;
669 sw_desc->async_tx.flags = flags;
670 grp_start = sw_desc->group_head;
671 mv_desc_init(grp_start, flags);
672 mv_desc_set_byte_count(grp_start, len);
673 mv_desc_set_dest_addr(sw_desc->group_head, dest);
674 mv_desc_set_src_addr(grp_start, 0, src);
675 sw_desc->unmap_src_cnt = 1;
676 sw_desc->unmap_len = len;
677 }
678 spin_unlock_bh(&mv_chan->lock);
679
c98c1781 680 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
681 "%s sw_desc %p async_tx %p\n",
682 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
683
684 return sw_desc ? &sw_desc->async_tx : NULL;
685}
686
687static struct dma_async_tx_descriptor *
688mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
689 size_t len, unsigned long flags)
690{
691 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
692 struct mv_xor_desc_slot *sw_desc, *grp_start;
693 int slot_cnt;
694
c98c1781 695 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
696 "%s dest: %x len: %u flags: %ld\n",
697 __func__, dest, len, flags);
698 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
699 return NULL;
700
7912d300 701 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
702
703 spin_lock_bh(&mv_chan->lock);
704 slot_cnt = mv_chan_memset_slot_count(len);
705 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
706 if (sw_desc) {
707 sw_desc->type = DMA_MEMSET;
708 sw_desc->async_tx.flags = flags;
709 grp_start = sw_desc->group_head;
710 mv_desc_init(grp_start, flags);
711 mv_desc_set_byte_count(grp_start, len);
712 mv_desc_set_dest_addr(sw_desc->group_head, dest);
713 mv_desc_set_block_fill_val(grp_start, value);
714 sw_desc->unmap_src_cnt = 1;
715 sw_desc->unmap_len = len;
716 }
717 spin_unlock_bh(&mv_chan->lock);
c98c1781 718 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
719 "%s sw_desc %p async_tx %p \n",
720 __func__, sw_desc, &sw_desc->async_tx);
721 return sw_desc ? &sw_desc->async_tx : NULL;
722}
723
724static struct dma_async_tx_descriptor *
725mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
726 unsigned int src_cnt, size_t len, unsigned long flags)
727{
728 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
729 struct mv_xor_desc_slot *sw_desc, *grp_start;
730 int slot_cnt;
731
732 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
733 return NULL;
734
7912d300 735 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479 736
c98c1781 737 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
738 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
739 __func__, src_cnt, len, dest, flags);
740
741 spin_lock_bh(&mv_chan->lock);
742 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
743 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
744 if (sw_desc) {
745 sw_desc->type = DMA_XOR;
746 sw_desc->async_tx.flags = flags;
747 grp_start = sw_desc->group_head;
748 mv_desc_init(grp_start, flags);
749 /* the byte count field is the same as in memcpy desc*/
750 mv_desc_set_byte_count(grp_start, len);
751 mv_desc_set_dest_addr(sw_desc->group_head, dest);
752 sw_desc->unmap_src_cnt = src_cnt;
753 sw_desc->unmap_len = len;
754 while (src_cnt--)
755 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
756 }
757 spin_unlock_bh(&mv_chan->lock);
c98c1781 758 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
759 "%s sw_desc %p async_tx %p \n",
760 __func__, sw_desc, &sw_desc->async_tx);
761 return sw_desc ? &sw_desc->async_tx : NULL;
762}
763
764static void mv_xor_free_chan_resources(struct dma_chan *chan)
765{
766 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
767 struct mv_xor_desc_slot *iter, *_iter;
768 int in_use_descs = 0;
769
770 mv_xor_slot_cleanup(mv_chan);
771
772 spin_lock_bh(&mv_chan->lock);
773 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
774 chain_node) {
775 in_use_descs++;
776 list_del(&iter->chain_node);
777 }
778 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
779 completed_node) {
780 in_use_descs++;
781 list_del(&iter->completed_node);
782 }
783 list_for_each_entry_safe_reverse(
784 iter, _iter, &mv_chan->all_slots, slot_node) {
785 list_del(&iter->slot_node);
786 kfree(iter);
787 mv_chan->slots_allocated--;
788 }
789 mv_chan->last_used = NULL;
790
c98c1781 791 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
ff7b0479
SB
792 __func__, mv_chan->slots_allocated);
793 spin_unlock_bh(&mv_chan->lock);
794
795 if (in_use_descs)
c98c1781 796 dev_err(mv_chan_to_devp(mv_chan),
ff7b0479
SB
797 "freeing %d in use descriptors!\n", in_use_descs);
798}
799
800/**
07934481 801 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
802 * @chan: XOR channel handle
803 * @cookie: XOR transaction identifier
07934481 804 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 805 */
07934481 806static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 807 dma_cookie_t cookie,
07934481 808 struct dma_tx_state *txstate)
ff7b0479
SB
809{
810 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
811 enum dma_status ret;
812
96a2af41 813 ret = dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
814 if (ret == DMA_SUCCESS) {
815 mv_xor_clean_completed_slots(mv_chan);
816 return ret;
817 }
818 mv_xor_slot_cleanup(mv_chan);
819
96a2af41 820 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
821}
822
823static void mv_dump_xor_regs(struct mv_xor_chan *chan)
824{
825 u32 val;
826
827 val = __raw_readl(XOR_CONFIG(chan));
c98c1781 828 dev_err(mv_chan_to_devp(chan),
a3fc74bc 829 "config 0x%08x.\n", val);
ff7b0479
SB
830
831 val = __raw_readl(XOR_ACTIVATION(chan));
c98c1781 832 dev_err(mv_chan_to_devp(chan),
a3fc74bc 833 "activation 0x%08x.\n", val);
ff7b0479
SB
834
835 val = __raw_readl(XOR_INTR_CAUSE(chan));
c98c1781 836 dev_err(mv_chan_to_devp(chan),
a3fc74bc 837 "intr cause 0x%08x.\n", val);
ff7b0479
SB
838
839 val = __raw_readl(XOR_INTR_MASK(chan));
c98c1781 840 dev_err(mv_chan_to_devp(chan),
a3fc74bc 841 "intr mask 0x%08x.\n", val);
ff7b0479
SB
842
843 val = __raw_readl(XOR_ERROR_CAUSE(chan));
c98c1781 844 dev_err(mv_chan_to_devp(chan),
a3fc74bc 845 "error cause 0x%08x.\n", val);
ff7b0479
SB
846
847 val = __raw_readl(XOR_ERROR_ADDR(chan));
c98c1781 848 dev_err(mv_chan_to_devp(chan),
a3fc74bc 849 "error addr 0x%08x.\n", val);
ff7b0479
SB
850}
851
852static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
853 u32 intr_cause)
854{
855 if (intr_cause & (1 << 4)) {
c98c1781 856 dev_dbg(mv_chan_to_devp(chan),
ff7b0479
SB
857 "ignore this error\n");
858 return;
859 }
860
c98c1781 861 dev_err(mv_chan_to_devp(chan),
a3fc74bc
TP
862 "error on chan %d. intr cause 0x%08x.\n",
863 chan->idx, intr_cause);
ff7b0479
SB
864
865 mv_dump_xor_regs(chan);
866 BUG();
867}
868
869static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
870{
871 struct mv_xor_chan *chan = data;
872 u32 intr_cause = mv_chan_get_intr_cause(chan);
873
c98c1781 874 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
ff7b0479
SB
875
876 if (mv_is_err_intr(intr_cause))
877 mv_xor_err_interrupt_handler(chan, intr_cause);
878
879 tasklet_schedule(&chan->irq_tasklet);
880
881 mv_xor_device_clear_eoc_cause(chan);
882
883 return IRQ_HANDLED;
884}
885
886static void mv_xor_issue_pending(struct dma_chan *chan)
887{
888 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
889
890 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
891 mv_chan->pending = 0;
892 mv_chan_activate(mv_chan);
893 }
894}
895
896/*
897 * Perform a transaction to verify the HW works.
898 */
899#define MV_XOR_TEST_SIZE 2000
900
901static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
902{
903 int i;
904 void *src, *dest;
905 dma_addr_t src_dma, dest_dma;
906 struct dma_chan *dma_chan;
907 dma_cookie_t cookie;
908 struct dma_async_tx_descriptor *tx;
909 int err = 0;
ff7b0479
SB
910
911 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
912 if (!src)
913 return -ENOMEM;
914
915 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!dest) {
917 kfree(src);
918 return -ENOMEM;
919 }
920
921 /* Fill in src buffer */
922 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
923 ((u8 *) src)[i] = (u8)i;
924
925 /* Start copy, using first DMA channel */
926 dma_chan = container_of(device->common.channels.next,
927 struct dma_chan,
928 device_node);
aa1e6f1a 929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
930 err = -ENODEV;
931 goto out;
932 }
933
934 dest_dma = dma_map_single(dma_chan->device->dev, dest,
935 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
936
937 src_dma = dma_map_single(dma_chan->device->dev, src,
938 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
939
940 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
941 MV_XOR_TEST_SIZE, 0);
942 cookie = mv_xor_tx_submit(tx);
943 mv_xor_issue_pending(dma_chan);
944 async_tx_ack(tx);
945 msleep(1);
946
07934481 947 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 948 DMA_SUCCESS) {
a3fc74bc
TP
949 dev_err(dma_chan->device->dev,
950 "Self-test copy timed out, disabling\n");
ff7b0479
SB
951 err = -ENODEV;
952 goto free_resources;
953 }
954
c35064c4 955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
a3fc74bc
TP
958 dev_err(dma_chan->device->dev,
959 "Self-test copy failed compare, disabling\n");
ff7b0479
SB
960 err = -ENODEV;
961 goto free_resources;
962 }
963
964free_resources:
965 mv_xor_free_chan_resources(dma_chan);
966out:
967 kfree(src);
968 kfree(dest);
969 return err;
970}
971
972#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
973static int __devinit
974mv_xor_xor_self_test(struct mv_xor_device *device)
975{
976 int i, src_idx;
977 struct page *dest;
978 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
979 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
980 dma_addr_t dest_dma;
981 struct dma_async_tx_descriptor *tx;
982 struct dma_chan *dma_chan;
983 dma_cookie_t cookie;
984 u8 cmp_byte = 0;
985 u32 cmp_word;
986 int err = 0;
ff7b0479
SB
987
988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
990 if (!xor_srcs[src_idx]) {
991 while (src_idx--)
ff7b0479 992 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
993 return -ENOMEM;
994 }
ff7b0479
SB
995 }
996
997 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
998 if (!dest) {
999 while (src_idx--)
ff7b0479 1000 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
1001 return -ENOMEM;
1002 }
ff7b0479
SB
1003
1004 /* Fill in src buffers */
1005 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1006 u8 *ptr = page_address(xor_srcs[src_idx]);
1007 for (i = 0; i < PAGE_SIZE; i++)
1008 ptr[i] = (1 << src_idx);
1009 }
1010
1011 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1012 cmp_byte ^= (u8) (1 << src_idx);
1013
1014 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1015 (cmp_byte << 8) | cmp_byte;
1016
1017 memset(page_address(dest), 0, PAGE_SIZE);
1018
1019 dma_chan = container_of(device->common.channels.next,
1020 struct dma_chan,
1021 device_node);
aa1e6f1a 1022 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
1023 err = -ENODEV;
1024 goto out;
1025 }
1026
1027 /* test xor */
1028 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1029 DMA_FROM_DEVICE);
1030
1031 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1032 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1033 0, PAGE_SIZE, DMA_TO_DEVICE);
1034
1035 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1036 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1037
1038 cookie = mv_xor_tx_submit(tx);
1039 mv_xor_issue_pending(dma_chan);
1040 async_tx_ack(tx);
1041 msleep(8);
1042
07934481 1043 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 1044 DMA_SUCCESS) {
a3fc74bc
TP
1045 dev_err(dma_chan->device->dev,
1046 "Self-test xor timed out, disabling\n");
ff7b0479
SB
1047 err = -ENODEV;
1048 goto free_resources;
1049 }
1050
c35064c4 1051 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
1052 PAGE_SIZE, DMA_FROM_DEVICE);
1053 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1054 u32 *ptr = page_address(dest);
1055 if (ptr[i] != cmp_word) {
a3fc74bc
TP
1056 dev_err(dma_chan->device->dev,
1057 "Self-test xor failed compare, disabling."
1058 " index %d, data %x, expected %x\n", i,
1059 ptr[i], cmp_word);
ff7b0479
SB
1060 err = -ENODEV;
1061 goto free_resources;
1062 }
1063 }
1064
1065free_resources:
1066 mv_xor_free_chan_resources(dma_chan);
1067out:
1068 src_idx = MV_XOR_NUM_SRC_TEST;
1069 while (src_idx--)
1070 __free_page(xor_srcs[src_idx]);
1071 __free_page(dest);
1072 return err;
1073}
1074
a6b4a9d2 1075static int mv_xor_channel_remove(struct mv_xor_device *device)
ff7b0479 1076{
ff7b0479
SB
1077 struct dma_chan *chan, *_chan;
1078 struct mv_xor_chan *mv_chan;
ecde6cd4 1079 struct device *dev = device->common.dev;
ff7b0479
SB
1080
1081 dma_async_device_unregister(&device->common);
1082
ecde6cd4 1083 dma_free_coherent(dev, device->pool_size,
a6b4a9d2 1084 device->dma_desc_pool_virt, device->dma_desc_pool);
ff7b0479
SB
1085
1086 list_for_each_entry_safe(chan, _chan, &device->common.channels,
a6b4a9d2 1087 device_node) {
ff7b0479
SB
1088 mv_chan = to_mv_xor_chan(chan);
1089 list_del(&chan->device_node);
1090 }
1091
1092 return 0;
1093}
1094
a6b4a9d2 1095static struct mv_xor_device *
61971656 1096mv_xor_channel_add(struct mv_xor_private *msp,
a6b4a9d2
TP
1097 struct platform_device *pdev,
1098 int hw_id, dma_cap_mask_t cap_mask,
1099 size_t pool_size, int irq)
ff7b0479
SB
1100{
1101 int ret = 0;
ff7b0479
SB
1102 struct mv_xor_device *adev;
1103 struct mv_xor_chan *mv_chan;
1104 struct dma_device *dma_dev;
ff7b0479
SB
1105
1106 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1107 if (!adev)
a6b4a9d2 1108 return ERR_PTR(-ENOMEM);
ff7b0479
SB
1109
1110 dma_dev = &adev->common;
1111
1112 /* allocate coherent memory for hardware descriptors
1113 * note: writecombine gives slightly better performance, but
1114 * requires that we explicitly flush the writes
1115 */
a6b4a9d2 1116 adev->pool_size = pool_size;
ff7b0479 1117 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
09f2b786 1118 adev->pool_size,
ff7b0479
SB
1119 &adev->dma_desc_pool,
1120 GFP_KERNEL);
1121 if (!adev->dma_desc_pool_virt)
a6b4a9d2 1122 return ERR_PTR(-ENOMEM);
ff7b0479 1123
ff7b0479 1124 /* discover transaction capabilites from the platform data */
a6b4a9d2 1125 dma_dev->cap_mask = cap_mask;
a6b4a9d2 1126 adev->shared = msp;
ff7b0479
SB
1127
1128 INIT_LIST_HEAD(&dma_dev->channels);
1129
1130 /* set base routines */
1131 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1132 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1133 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
1134 dma_dev->device_issue_pending = mv_xor_issue_pending;
1135 dma_dev->dev = &pdev->dev;
1136
1137 /* set prep routines based on capability */
1138 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1139 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1140 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1141 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1142 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1143 dma_dev->max_xor = 8;
ff7b0479
SB
1144 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1145 }
1146
1147 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1148 if (!mv_chan) {
1149 ret = -ENOMEM;
1150 goto err_free_dma;
1151 }
1152 mv_chan->device = adev;
a6b4a9d2 1153 mv_chan->idx = hw_id;
ff7b0479
SB
1154 mv_chan->mmr_base = adev->shared->xor_base;
1155
1156 if (!mv_chan->mmr_base) {
1157 ret = -ENOMEM;
1158 goto err_free_dma;
1159 }
1160 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1161 mv_chan);
1162
1163 /* clear errors before enabling interrupts */
1164 mv_xor_device_clear_err_status(mv_chan);
1165
ff7b0479
SB
1166 ret = devm_request_irq(&pdev->dev, irq,
1167 mv_xor_interrupt_handler,
1168 0, dev_name(&pdev->dev), mv_chan);
1169 if (ret)
1170 goto err_free_dma;
1171
1172 mv_chan_unmask_interrupts(mv_chan);
1173
1174 mv_set_mode(mv_chan, DMA_MEMCPY);
1175
1176 spin_lock_init(&mv_chan->lock);
1177 INIT_LIST_HEAD(&mv_chan->chain);
1178 INIT_LIST_HEAD(&mv_chan->completed_slots);
1179 INIT_LIST_HEAD(&mv_chan->all_slots);
ff7b0479 1180 mv_chan->common.device = dma_dev;
8ac69546 1181 dma_cookie_init(&mv_chan->common);
ff7b0479
SB
1182
1183 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1184
1185 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1186 ret = mv_xor_memcpy_self_test(adev);
1187 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1188 if (ret)
1189 goto err_free_dma;
1190 }
1191
1192 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1193 ret = mv_xor_xor_self_test(adev);
1194 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1195 if (ret)
1196 goto err_free_dma;
1197 }
1198
a3fc74bc 1199 dev_info(&pdev->dev, "Marvell XOR: "
ff7b0479
SB
1200 "( %s%s%s%s)\n",
1201 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1202 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1203 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1204 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1205
1206 dma_async_device_register(dma_dev);
a6b4a9d2 1207 return adev;
ff7b0479
SB
1208
1209 err_free_dma:
ecde6cd4 1210 dma_free_coherent(&pdev->dev, pool_size,
ff7b0479 1211 adev->dma_desc_pool_virt, adev->dma_desc_pool);
a6b4a9d2
TP
1212 return ERR_PTR(ret);
1213}
1214
ff7b0479 1215static void
61971656 1216mv_xor_conf_mbus_windows(struct mv_xor_private *msp,
63a9332b 1217 const struct mbus_dram_target_info *dram)
ff7b0479
SB
1218{
1219 void __iomem *base = msp->xor_base;
1220 u32 win_enable = 0;
1221 int i;
1222
1223 for (i = 0; i < 8; i++) {
1224 writel(0, base + WINDOW_BASE(i));
1225 writel(0, base + WINDOW_SIZE(i));
1226 if (i < 4)
1227 writel(0, base + WINDOW_REMAP_HIGH(i));
1228 }
1229
1230 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1231 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1232
1233 writel((cs->base & 0xffff0000) |
1234 (cs->mbus_attr << 8) |
1235 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1236 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1237
1238 win_enable |= (1 << i);
1239 win_enable |= 3 << (16 + (2 * i));
1240 }
1241
1242 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1243 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1244}
1245
61971656 1246static int mv_xor_probe(struct platform_device *pdev)
ff7b0479 1247{
63a9332b 1248 const struct mbus_dram_target_info *dram;
61971656 1249 struct mv_xor_private *msp;
7dde453d 1250 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
ff7b0479 1251 struct resource *res;
60d151f3 1252 int i, ret;
ff7b0479 1253
61971656 1254 dev_notice(&pdev->dev, "Marvell XOR driver\n");
ff7b0479
SB
1255
1256 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1257 if (!msp)
1258 return -ENOMEM;
1259
1260 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1261 if (!res)
1262 return -ENODEV;
1263
1264 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1265 resource_size(res));
ff7b0479
SB
1266 if (!msp->xor_base)
1267 return -EBUSY;
1268
1269 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1270 if (!res)
1271 return -ENODEV;
1272
1273 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1274 resource_size(res));
ff7b0479
SB
1275 if (!msp->xor_high_base)
1276 return -EBUSY;
1277
1278 platform_set_drvdata(pdev, msp);
1279
1280 /*
1281 * (Re-)program MBUS remapping windows if we are asked to.
1282 */
63a9332b
AL
1283 dram = mv_mbus_dram_info();
1284 if (dram)
1285 mv_xor_conf_mbus_windows(msp, dram);
ff7b0479 1286
c510182b
AL
1287 /* Not all platforms can gate the clock, so it is not
1288 * an error if the clock does not exists.
1289 */
1290 msp->clk = clk_get(&pdev->dev, NULL);
1291 if (!IS_ERR(msp->clk))
1292 clk_prepare_enable(msp->clk);
1293
60d151f3
TP
1294 if (pdata && pdata->channels) {
1295 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
e39f6ec1 1296 struct mv_xor_channel_data *cd;
60d151f3
TP
1297 int irq;
1298
1299 cd = &pdata->channels[i];
1300 if (!cd) {
1301 ret = -ENODEV;
1302 goto err_channel_add;
1303 }
1304
1305 irq = platform_get_irq(pdev, i);
1306 if (irq < 0) {
1307 ret = irq;
1308 goto err_channel_add;
1309 }
1310
1311 msp->channels[i] =
1312 mv_xor_channel_add(msp, pdev, cd->hw_id,
1313 cd->cap_mask,
1314 cd->pool_size, irq);
1315 if (IS_ERR(msp->channels[i])) {
1316 ret = PTR_ERR(msp->channels[i]);
1317 goto err_channel_add;
1318 }
1319 }
1320 }
1321
ff7b0479 1322 return 0;
60d151f3
TP
1323
1324err_channel_add:
1325 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1326 if (msp->channels[i])
1327 mv_xor_channel_remove(msp->channels[i]);
1328
1329 clk_disable_unprepare(msp->clk);
1330 clk_put(msp->clk);
1331 return ret;
ff7b0479
SB
1332}
1333
61971656 1334static int mv_xor_remove(struct platform_device *pdev)
ff7b0479 1335{
61971656 1336 struct mv_xor_private *msp = platform_get_drvdata(pdev);
60d151f3
TP
1337 int i;
1338
1339 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1340 if (msp->channels[i])
1341 mv_xor_channel_remove(msp->channels[i]);
1342 }
c510182b
AL
1343
1344 if (!IS_ERR(msp->clk)) {
1345 clk_disable_unprepare(msp->clk);
1346 clk_put(msp->clk);
1347 }
1348
ff7b0479
SB
1349 return 0;
1350}
1351
61971656
TP
1352static struct platform_driver mv_xor_driver = {
1353 .probe = mv_xor_probe,
1354 .remove = mv_xor_remove,
ff7b0479
SB
1355 .driver = {
1356 .owner = THIS_MODULE,
0dddee7a 1357 .name = MV_XOR_NAME,
ff7b0479
SB
1358 },
1359};
1360
1361
1362static int __init mv_xor_init(void)
1363{
61971656 1364 return platform_driver_register(&mv_xor_driver);
ff7b0479
SB
1365}
1366module_init(mv_xor_init);
1367
1368/* it's currently unsafe to unload this module */
1369#if 0
1370static void __exit mv_xor_exit(void)
1371{
61971656 1372 platform_driver_unregister(&mv_xor_driver);
ff7b0479
SB
1373 return;
1374}
1375
1376module_exit(mv_xor_exit);
1377#endif
1378
1379MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1380MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1381MODULE_LICENSE("GPL");