]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/mv_xor.c
dmaengine: add private header file
[mirror_ubuntu-artful-kernel.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
6f088f1d 28#include <plat/mv_xor.h>
d2ebfb33
RKAL
29
30#include "dmaengine.h"
ff7b0479
SB
31#include "mv_xor.h"
32
33static void mv_xor_issue_pending(struct dma_chan *chan);
34
35#define to_mv_xor_chan(chan) \
36 container_of(chan, struct mv_xor_chan, common)
37
38#define to_mv_xor_device(dev) \
39 container_of(dev, struct mv_xor_device, common)
40
41#define to_mv_xor_slot(tx) \
42 container_of(tx, struct mv_xor_desc_slot, async_tx)
43
44static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
45{
46 struct mv_xor_desc *hw_desc = desc->hw_desc;
47
48 hw_desc->status = (1 << 31);
49 hw_desc->phy_next_desc = 0;
50 hw_desc->desc_command = (1 << 31);
51}
52
53static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
54{
55 struct mv_xor_desc *hw_desc = desc->hw_desc;
56 return hw_desc->phy_dest_addr;
57}
58
59static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
60 int src_idx)
61{
62 struct mv_xor_desc *hw_desc = desc->hw_desc;
63 return hw_desc->phy_src_addr[src_idx];
64}
65
66
67static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
68 u32 byte_count)
69{
70 struct mv_xor_desc *hw_desc = desc->hw_desc;
71 hw_desc->byte_count = byte_count;
72}
73
74static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
75 u32 next_desc_addr)
76{
77 struct mv_xor_desc *hw_desc = desc->hw_desc;
78 BUG_ON(hw_desc->phy_next_desc);
79 hw_desc->phy_next_desc = next_desc_addr;
80}
81
82static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
83{
84 struct mv_xor_desc *hw_desc = desc->hw_desc;
85 hw_desc->phy_next_desc = 0;
86}
87
88static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
89{
90 desc->value = val;
91}
92
93static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
94 dma_addr_t addr)
95{
96 struct mv_xor_desc *hw_desc = desc->hw_desc;
97 hw_desc->phy_dest_addr = addr;
98}
99
100static int mv_chan_memset_slot_count(size_t len)
101{
102 return 1;
103}
104
105#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
106
107static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
108 int index, dma_addr_t addr)
109{
110 struct mv_xor_desc *hw_desc = desc->hw_desc;
111 hw_desc->phy_src_addr[index] = addr;
112 if (desc->type == DMA_XOR)
113 hw_desc->desc_command |= (1 << index);
114}
115
116static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
117{
118 return __raw_readl(XOR_CURR_DESC(chan));
119}
120
121static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
122 u32 next_desc_addr)
123{
124 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
125}
126
127static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
128{
129 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
130}
131
132static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
133{
134 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
135}
136
137static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
138{
139 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
140 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
141}
142
143static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
144{
145 u32 val = __raw_readl(XOR_INTR_MASK(chan));
146 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
147 __raw_writel(val, XOR_INTR_MASK(chan));
148}
149
150static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
151{
152 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
153 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
154 return intr_cause;
155}
156
157static int mv_is_err_intr(u32 intr_cause)
158{
159 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
160 return 1;
161
162 return 0;
163}
164
165static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
166{
86363682 167 u32 val = ~(1 << (chan->idx * 16));
ff7b0479
SB
168 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
169 __raw_writel(val, XOR_INTR_CAUSE(chan));
170}
171
172static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
173{
174 u32 val = 0xFFFF0000 >> (chan->idx * 16);
175 __raw_writel(val, XOR_INTR_CAUSE(chan));
176}
177
178static int mv_can_chain(struct mv_xor_desc_slot *desc)
179{
180 struct mv_xor_desc_slot *chain_old_tail = list_entry(
181 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
182
183 if (chain_old_tail->type != desc->type)
184 return 0;
185 if (desc->type == DMA_MEMSET)
186 return 0;
187
188 return 1;
189}
190
191static void mv_set_mode(struct mv_xor_chan *chan,
192 enum dma_transaction_type type)
193{
194 u32 op_mode;
195 u32 config = __raw_readl(XOR_CONFIG(chan));
196
197 switch (type) {
198 case DMA_XOR:
199 op_mode = XOR_OPERATION_MODE_XOR;
200 break;
201 case DMA_MEMCPY:
202 op_mode = XOR_OPERATION_MODE_MEMCPY;
203 break;
204 case DMA_MEMSET:
205 op_mode = XOR_OPERATION_MODE_MEMSET;
206 break;
207 default:
208 dev_printk(KERN_ERR, chan->device->common.dev,
209 "error: unsupported operation %d.\n",
210 type);
211 BUG();
212 return;
213 }
214
215 config &= ~0x7;
216 config |= op_mode;
217 __raw_writel(config, XOR_CONFIG(chan));
218 chan->current_type = type;
219}
220
221static void mv_chan_activate(struct mv_xor_chan *chan)
222{
223 u32 activation;
224
225 dev_dbg(chan->device->common.dev, " activate chan.\n");
226 activation = __raw_readl(XOR_ACTIVATION(chan));
227 activation |= 0x1;
228 __raw_writel(activation, XOR_ACTIVATION(chan));
229}
230
231static char mv_chan_is_busy(struct mv_xor_chan *chan)
232{
233 u32 state = __raw_readl(XOR_ACTIVATION(chan));
234
235 state = (state >> 4) & 0x3;
236
237 return (state == 1) ? 1 : 0;
238}
239
240static int mv_chan_xor_slot_count(size_t len, int src_cnt)
241{
242 return 1;
243}
244
245/**
246 * mv_xor_free_slots - flags descriptor slots for reuse
247 * @slot: Slot to free
248 * Caller must hold &mv_chan->lock while calling this function
249 */
250static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *slot)
252{
253 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
254 __func__, __LINE__, slot);
255
256 slot->slots_per_op = 0;
257
258}
259
260/*
261 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
262 * sw_desc
263 * Caller must hold &mv_chan->lock while calling this function
264 */
265static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
266 struct mv_xor_desc_slot *sw_desc)
267{
268 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
269 __func__, __LINE__, sw_desc);
270 if (sw_desc->type != mv_chan->current_type)
271 mv_set_mode(mv_chan, sw_desc->type);
272
273 if (sw_desc->type == DMA_MEMSET) {
274 /* for memset requests we need to program the engine, no
275 * descriptors used.
276 */
277 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
278 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
279 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
280 mv_chan_set_value(mv_chan, sw_desc->value);
281 } else {
282 /* set the hardware chain */
283 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
284 }
285 mv_chan->pending += sw_desc->slot_cnt;
286 mv_xor_issue_pending(&mv_chan->common);
287}
288
289static dma_cookie_t
290mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
291 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
292{
293 BUG_ON(desc->async_tx.cookie < 0);
294
295 if (desc->async_tx.cookie > 0) {
296 cookie = desc->async_tx.cookie;
297
298 /* call the callback (must not sleep or submit new
299 * operations to this channel)
300 */
301 if (desc->async_tx.callback)
302 desc->async_tx.callback(
303 desc->async_tx.callback_param);
304
305 /* unmap dma addresses
306 * (unmap_single vs unmap_page?)
307 */
308 if (desc->group_head && desc->unmap_len) {
309 struct mv_xor_desc_slot *unmap = desc->group_head;
310 struct device *dev =
311 &mv_chan->device->pdev->dev;
312 u32 len = unmap->unmap_len;
e1d181ef
DW
313 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 u32 src_cnt;
315 dma_addr_t addr;
a06d568f 316 dma_addr_t dest;
ff7b0479 317
a06d568f
DW
318 src_cnt = unmap->unmap_src_cnt;
319 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
321 enum dma_data_direction dir;
322
323 if (src_cnt > 1) /* is xor ? */
324 dir = DMA_BIDIRECTIONAL;
325 else
326 dir = DMA_FROM_DEVICE;
327 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
328 }
329
330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
331 while (src_cnt--) {
332 addr = mv_desc_get_src_addr(unmap,
333 src_cnt);
a06d568f
DW
334 if (addr == dest)
335 continue;
e1d181ef
DW
336 dma_unmap_page(dev, addr, len,
337 DMA_TO_DEVICE);
338 }
ff7b0479
SB
339 }
340 desc->group_head = NULL;
341 }
342 }
343
344 /* run dependent operations */
07f2211e 345 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
346
347 return cookie;
348}
349
350static int
351mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352{
353 struct mv_xor_desc_slot *iter, *_iter;
354
355 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 completed_node) {
358
359 if (async_tx_test_ack(&iter->async_tx)) {
360 list_del(&iter->completed_node);
361 mv_xor_free_slots(mv_chan, iter);
362 }
363 }
364 return 0;
365}
366
367static int
368mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 struct mv_xor_chan *mv_chan)
370{
371 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
372 __func__, __LINE__, desc, desc->async_tx.flags);
373 list_del(&desc->chain_node);
374 /* the client is allowed to attach dependent operations
375 * until 'ack' is set
376 */
377 if (!async_tx_test_ack(&desc->async_tx)) {
378 /* move this slot to the completed_slots */
379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 return 0;
381 }
382
383 mv_xor_free_slots(mv_chan, desc);
384 return 0;
385}
386
387static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388{
389 struct mv_xor_desc_slot *iter, *_iter;
390 dma_cookie_t cookie = 0;
391 int busy = mv_chan_is_busy(mv_chan);
392 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 int seen_current = 0;
394
395 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
396 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
397 mv_xor_clean_completed_slots(mv_chan);
398
399 /* free completed slots from the chain starting with
400 * the oldest descriptor
401 */
402
403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 chain_node) {
405 prefetch(_iter);
406 prefetch(&_iter->async_tx);
407
408 /* do not advance past the current descriptor loaded into the
409 * hardware channel, subsequent descriptors are either in
410 * process or have not been submitted
411 */
412 if (seen_current)
413 break;
414
415 /* stop the search if we reach the current descriptor and the
416 * channel is busy
417 */
418 if (iter->async_tx.phys == current_desc) {
419 seen_current = 1;
420 if (busy)
421 break;
422 }
423
424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425
426 if (mv_xor_clean_slot(iter, mv_chan))
427 break;
428 }
429
430 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 struct mv_xor_desc_slot *chain_head;
432 chain_head = list_entry(mv_chan->chain.next,
433 struct mv_xor_desc_slot,
434 chain_node);
435
436 mv_xor_start_new_chain(mv_chan, chain_head);
437 }
438
439 if (cookie > 0)
4d4e58de 440 mv_chan->common.completed_cookie = cookie;
ff7b0479
SB
441}
442
443static void
444mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445{
446 spin_lock_bh(&mv_chan->lock);
447 __mv_xor_slot_cleanup(mv_chan);
448 spin_unlock_bh(&mv_chan->lock);
449}
450
451static void mv_xor_tasklet(unsigned long data)
452{
453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 454 mv_xor_slot_cleanup(chan);
ff7b0479
SB
455}
456
457static struct mv_xor_desc_slot *
458mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 int slots_per_op)
460{
461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 LIST_HEAD(chain);
463 int slots_found, retry = 0;
464
465 /* start search from the last allocated descrtiptor
466 * if a contiguous allocation can not be found start searching
467 * from the beginning of the list
468 */
469retry:
470 slots_found = 0;
471 if (retry == 0)
472 iter = mv_chan->last_used;
473 else
474 iter = list_entry(&mv_chan->all_slots,
475 struct mv_xor_desc_slot,
476 slot_node);
477
478 list_for_each_entry_safe_continue(
479 iter, _iter, &mv_chan->all_slots, slot_node) {
480 prefetch(_iter);
481 prefetch(&_iter->async_tx);
482 if (iter->slots_per_op) {
483 /* give up after finding the first busy slot
484 * on the second pass through the list
485 */
486 if (retry)
487 break;
488
489 slots_found = 0;
490 continue;
491 }
492
493 /* start the allocation if the slot is correctly aligned */
494 if (!slots_found++)
495 alloc_start = iter;
496
497 if (slots_found == num_slots) {
498 struct mv_xor_desc_slot *alloc_tail = NULL;
499 struct mv_xor_desc_slot *last_used = NULL;
500 iter = alloc_start;
501 while (num_slots) {
502 int i;
503
504 /* pre-ack all but the last descriptor */
505 async_tx_ack(&iter->async_tx);
506
507 list_add_tail(&iter->chain_node, &chain);
508 alloc_tail = iter;
509 iter->async_tx.cookie = 0;
510 iter->slot_cnt = num_slots;
511 iter->xor_check_result = NULL;
512 for (i = 0; i < slots_per_op; i++) {
513 iter->slots_per_op = slots_per_op - i;
514 last_used = iter;
515 iter = list_entry(iter->slot_node.next,
516 struct mv_xor_desc_slot,
517 slot_node);
518 }
519 num_slots -= slots_per_op;
520 }
521 alloc_tail->group_head = alloc_start;
522 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 523 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
524 mv_chan->last_used = last_used;
525 mv_desc_clear_next_desc(alloc_start);
526 mv_desc_clear_next_desc(alloc_tail);
527 return alloc_tail;
528 }
529 }
530 if (!retry++)
531 goto retry;
532
533 /* try to free some slots if the allocation fails */
534 tasklet_schedule(&mv_chan->irq_tasklet);
535
536 return NULL;
537}
538
539static dma_cookie_t
540mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
541 struct mv_xor_desc_slot *desc)
542{
543 dma_cookie_t cookie = mv_chan->common.cookie;
544
545 if (++cookie < 0)
546 cookie = 1;
547 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
548 return cookie;
549}
550
551/************************ DMA engine API functions ****************************/
552static dma_cookie_t
553mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
554{
555 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
556 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
557 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
558 dma_cookie_t cookie;
559 int new_hw_chain = 1;
560
561 dev_dbg(mv_chan->device->common.dev,
562 "%s sw_desc %p: async_tx %p\n",
563 __func__, sw_desc, &sw_desc->async_tx);
564
565 grp_start = sw_desc->group_head;
566
567 spin_lock_bh(&mv_chan->lock);
568 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
569
570 if (list_empty(&mv_chan->chain))
64203b67 571 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
572 else {
573 new_hw_chain = 0;
574
575 old_chain_tail = list_entry(mv_chan->chain.prev,
576 struct mv_xor_desc_slot,
577 chain_node);
64203b67 578 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
579 &old_chain_tail->chain_node);
580
581 if (!mv_can_chain(grp_start))
582 goto submit_done;
583
584 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
585 old_chain_tail->async_tx.phys);
586
587 /* fix up the hardware chain */
588 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
589
590 /* if the channel is not busy */
591 if (!mv_chan_is_busy(mv_chan)) {
592 u32 current_desc = mv_chan_get_current_desc(mv_chan);
593 /*
594 * and the curren desc is the end of the chain before
595 * the append, then we need to start the channel
596 */
597 if (current_desc == old_chain_tail->async_tx.phys)
598 new_hw_chain = 1;
599 }
600 }
601
602 if (new_hw_chain)
603 mv_xor_start_new_chain(mv_chan, grp_start);
604
605submit_done:
606 spin_unlock_bh(&mv_chan->lock);
607
608 return cookie;
609}
610
611/* returns the number of allocated descriptors */
aa1e6f1a 612static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
613{
614 char *hw_desc;
615 int idx;
616 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
617 struct mv_xor_desc_slot *slot = NULL;
618 struct mv_xor_platform_data *plat_data =
619 mv_chan->device->pdev->dev.platform_data;
620 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
621
622 /* Allocate descriptor slots */
623 idx = mv_chan->slots_allocated;
624 while (idx < num_descs_in_pool) {
625 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
626 if (!slot) {
627 printk(KERN_INFO "MV XOR Channel only initialized"
628 " %d descriptor slots", idx);
629 break;
630 }
631 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
632 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
633
634 dma_async_tx_descriptor_init(&slot->async_tx, chan);
635 slot->async_tx.tx_submit = mv_xor_tx_submit;
636 INIT_LIST_HEAD(&slot->chain_node);
637 INIT_LIST_HEAD(&slot->slot_node);
64203b67 638 INIT_LIST_HEAD(&slot->tx_list);
ff7b0479
SB
639 hw_desc = (char *) mv_chan->device->dma_desc_pool;
640 slot->async_tx.phys =
641 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
642 slot->idx = idx++;
643
644 spin_lock_bh(&mv_chan->lock);
645 mv_chan->slots_allocated = idx;
646 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
647 spin_unlock_bh(&mv_chan->lock);
648 }
649
650 if (mv_chan->slots_allocated && !mv_chan->last_used)
651 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
652 struct mv_xor_desc_slot,
653 slot_node);
654
655 dev_dbg(mv_chan->device->common.dev,
656 "allocated %d descriptor slots last_used: %p\n",
657 mv_chan->slots_allocated, mv_chan->last_used);
658
659 return mv_chan->slots_allocated ? : -ENOMEM;
660}
661
662static struct dma_async_tx_descriptor *
663mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
664 size_t len, unsigned long flags)
665{
666 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
667 struct mv_xor_desc_slot *sw_desc, *grp_start;
668 int slot_cnt;
669
670 dev_dbg(mv_chan->device->common.dev,
671 "%s dest: %x src %x len: %u flags: %ld\n",
672 __func__, dest, src, len, flags);
673 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
674 return NULL;
675
7912d300 676 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
677
678 spin_lock_bh(&mv_chan->lock);
679 slot_cnt = mv_chan_memcpy_slot_count(len);
680 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
681 if (sw_desc) {
682 sw_desc->type = DMA_MEMCPY;
683 sw_desc->async_tx.flags = flags;
684 grp_start = sw_desc->group_head;
685 mv_desc_init(grp_start, flags);
686 mv_desc_set_byte_count(grp_start, len);
687 mv_desc_set_dest_addr(sw_desc->group_head, dest);
688 mv_desc_set_src_addr(grp_start, 0, src);
689 sw_desc->unmap_src_cnt = 1;
690 sw_desc->unmap_len = len;
691 }
692 spin_unlock_bh(&mv_chan->lock);
693
694 dev_dbg(mv_chan->device->common.dev,
695 "%s sw_desc %p async_tx %p\n",
696 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
697
698 return sw_desc ? &sw_desc->async_tx : NULL;
699}
700
701static struct dma_async_tx_descriptor *
702mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
703 size_t len, unsigned long flags)
704{
705 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
706 struct mv_xor_desc_slot *sw_desc, *grp_start;
707 int slot_cnt;
708
709 dev_dbg(mv_chan->device->common.dev,
710 "%s dest: %x len: %u flags: %ld\n",
711 __func__, dest, len, flags);
712 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
713 return NULL;
714
7912d300 715 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
716
717 spin_lock_bh(&mv_chan->lock);
718 slot_cnt = mv_chan_memset_slot_count(len);
719 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
720 if (sw_desc) {
721 sw_desc->type = DMA_MEMSET;
722 sw_desc->async_tx.flags = flags;
723 grp_start = sw_desc->group_head;
724 mv_desc_init(grp_start, flags);
725 mv_desc_set_byte_count(grp_start, len);
726 mv_desc_set_dest_addr(sw_desc->group_head, dest);
727 mv_desc_set_block_fill_val(grp_start, value);
728 sw_desc->unmap_src_cnt = 1;
729 sw_desc->unmap_len = len;
730 }
731 spin_unlock_bh(&mv_chan->lock);
732 dev_dbg(mv_chan->device->common.dev,
733 "%s sw_desc %p async_tx %p \n",
734 __func__, sw_desc, &sw_desc->async_tx);
735 return sw_desc ? &sw_desc->async_tx : NULL;
736}
737
738static struct dma_async_tx_descriptor *
739mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
740 unsigned int src_cnt, size_t len, unsigned long flags)
741{
742 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
743 struct mv_xor_desc_slot *sw_desc, *grp_start;
744 int slot_cnt;
745
746 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
747 return NULL;
748
7912d300 749 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
750
751 dev_dbg(mv_chan->device->common.dev,
752 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
753 __func__, src_cnt, len, dest, flags);
754
755 spin_lock_bh(&mv_chan->lock);
756 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
757 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
758 if (sw_desc) {
759 sw_desc->type = DMA_XOR;
760 sw_desc->async_tx.flags = flags;
761 grp_start = sw_desc->group_head;
762 mv_desc_init(grp_start, flags);
763 /* the byte count field is the same as in memcpy desc*/
764 mv_desc_set_byte_count(grp_start, len);
765 mv_desc_set_dest_addr(sw_desc->group_head, dest);
766 sw_desc->unmap_src_cnt = src_cnt;
767 sw_desc->unmap_len = len;
768 while (src_cnt--)
769 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
770 }
771 spin_unlock_bh(&mv_chan->lock);
772 dev_dbg(mv_chan->device->common.dev,
773 "%s sw_desc %p async_tx %p \n",
774 __func__, sw_desc, &sw_desc->async_tx);
775 return sw_desc ? &sw_desc->async_tx : NULL;
776}
777
778static void mv_xor_free_chan_resources(struct dma_chan *chan)
779{
780 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
781 struct mv_xor_desc_slot *iter, *_iter;
782 int in_use_descs = 0;
783
784 mv_xor_slot_cleanup(mv_chan);
785
786 spin_lock_bh(&mv_chan->lock);
787 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
788 chain_node) {
789 in_use_descs++;
790 list_del(&iter->chain_node);
791 }
792 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
793 completed_node) {
794 in_use_descs++;
795 list_del(&iter->completed_node);
796 }
797 list_for_each_entry_safe_reverse(
798 iter, _iter, &mv_chan->all_slots, slot_node) {
799 list_del(&iter->slot_node);
800 kfree(iter);
801 mv_chan->slots_allocated--;
802 }
803 mv_chan->last_used = NULL;
804
805 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
806 __func__, mv_chan->slots_allocated);
807 spin_unlock_bh(&mv_chan->lock);
808
809 if (in_use_descs)
810 dev_err(mv_chan->device->common.dev,
811 "freeing %d in use descriptors!\n", in_use_descs);
812}
813
814/**
07934481 815 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
816 * @chan: XOR channel handle
817 * @cookie: XOR transaction identifier
07934481 818 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 819 */
07934481 820static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 821 dma_cookie_t cookie,
07934481 822 struct dma_tx_state *txstate)
ff7b0479
SB
823{
824 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
825 dma_cookie_t last_used;
826 dma_cookie_t last_complete;
827 enum dma_status ret;
828
829 last_used = chan->cookie;
4d4e58de 830 last_complete = chan->completed_cookie;
bca34692 831 dma_set_tx_state(txstate, last_complete, last_used, 0);
ff7b0479
SB
832
833 ret = dma_async_is_complete(cookie, last_complete, last_used);
834 if (ret == DMA_SUCCESS) {
835 mv_xor_clean_completed_slots(mv_chan);
836 return ret;
837 }
838 mv_xor_slot_cleanup(mv_chan);
839
840 last_used = chan->cookie;
4d4e58de 841 last_complete = chan->completed_cookie;
ff7b0479 842
bca34692 843 dma_set_tx_state(txstate, last_complete, last_used, 0);
ff7b0479
SB
844 return dma_async_is_complete(cookie, last_complete, last_used);
845}
846
847static void mv_dump_xor_regs(struct mv_xor_chan *chan)
848{
849 u32 val;
850
851 val = __raw_readl(XOR_CONFIG(chan));
852 dev_printk(KERN_ERR, chan->device->common.dev,
853 "config 0x%08x.\n", val);
854
855 val = __raw_readl(XOR_ACTIVATION(chan));
856 dev_printk(KERN_ERR, chan->device->common.dev,
857 "activation 0x%08x.\n", val);
858
859 val = __raw_readl(XOR_INTR_CAUSE(chan));
860 dev_printk(KERN_ERR, chan->device->common.dev,
861 "intr cause 0x%08x.\n", val);
862
863 val = __raw_readl(XOR_INTR_MASK(chan));
864 dev_printk(KERN_ERR, chan->device->common.dev,
865 "intr mask 0x%08x.\n", val);
866
867 val = __raw_readl(XOR_ERROR_CAUSE(chan));
868 dev_printk(KERN_ERR, chan->device->common.dev,
869 "error cause 0x%08x.\n", val);
870
871 val = __raw_readl(XOR_ERROR_ADDR(chan));
872 dev_printk(KERN_ERR, chan->device->common.dev,
873 "error addr 0x%08x.\n", val);
874}
875
876static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
877 u32 intr_cause)
878{
879 if (intr_cause & (1 << 4)) {
880 dev_dbg(chan->device->common.dev,
881 "ignore this error\n");
882 return;
883 }
884
885 dev_printk(KERN_ERR, chan->device->common.dev,
886 "error on chan %d. intr cause 0x%08x.\n",
887 chan->idx, intr_cause);
888
889 mv_dump_xor_regs(chan);
890 BUG();
891}
892
893static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
894{
895 struct mv_xor_chan *chan = data;
896 u32 intr_cause = mv_chan_get_intr_cause(chan);
897
898 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
899
900 if (mv_is_err_intr(intr_cause))
901 mv_xor_err_interrupt_handler(chan, intr_cause);
902
903 tasklet_schedule(&chan->irq_tasklet);
904
905 mv_xor_device_clear_eoc_cause(chan);
906
907 return IRQ_HANDLED;
908}
909
910static void mv_xor_issue_pending(struct dma_chan *chan)
911{
912 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
913
914 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
915 mv_chan->pending = 0;
916 mv_chan_activate(mv_chan);
917 }
918}
919
920/*
921 * Perform a transaction to verify the HW works.
922 */
923#define MV_XOR_TEST_SIZE 2000
924
925static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
926{
927 int i;
928 void *src, *dest;
929 dma_addr_t src_dma, dest_dma;
930 struct dma_chan *dma_chan;
931 dma_cookie_t cookie;
932 struct dma_async_tx_descriptor *tx;
933 int err = 0;
934 struct mv_xor_chan *mv_chan;
935
936 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
937 if (!src)
938 return -ENOMEM;
939
940 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
941 if (!dest) {
942 kfree(src);
943 return -ENOMEM;
944 }
945
946 /* Fill in src buffer */
947 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
948 ((u8 *) src)[i] = (u8)i;
949
950 /* Start copy, using first DMA channel */
951 dma_chan = container_of(device->common.channels.next,
952 struct dma_chan,
953 device_node);
aa1e6f1a 954 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
955 err = -ENODEV;
956 goto out;
957 }
958
959 dest_dma = dma_map_single(dma_chan->device->dev, dest,
960 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
961
962 src_dma = dma_map_single(dma_chan->device->dev, src,
963 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
964
965 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
966 MV_XOR_TEST_SIZE, 0);
967 cookie = mv_xor_tx_submit(tx);
968 mv_xor_issue_pending(dma_chan);
969 async_tx_ack(tx);
970 msleep(1);
971
07934481 972 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
973 DMA_SUCCESS) {
974 dev_printk(KERN_ERR, dma_chan->device->dev,
975 "Self-test copy timed out, disabling\n");
976 err = -ENODEV;
977 goto free_resources;
978 }
979
980 mv_chan = to_mv_xor_chan(dma_chan);
981 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
982 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
983 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
984 dev_printk(KERN_ERR, dma_chan->device->dev,
985 "Self-test copy failed compare, disabling\n");
986 err = -ENODEV;
987 goto free_resources;
988 }
989
990free_resources:
991 mv_xor_free_chan_resources(dma_chan);
992out:
993 kfree(src);
994 kfree(dest);
995 return err;
996}
997
998#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
999static int __devinit
1000mv_xor_xor_self_test(struct mv_xor_device *device)
1001{
1002 int i, src_idx;
1003 struct page *dest;
1004 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1005 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1006 dma_addr_t dest_dma;
1007 struct dma_async_tx_descriptor *tx;
1008 struct dma_chan *dma_chan;
1009 dma_cookie_t cookie;
1010 u8 cmp_byte = 0;
1011 u32 cmp_word;
1012 int err = 0;
1013 struct mv_xor_chan *mv_chan;
1014
1015 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1016 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
1017 if (!xor_srcs[src_idx]) {
1018 while (src_idx--)
ff7b0479 1019 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
1020 return -ENOMEM;
1021 }
ff7b0479
SB
1022 }
1023
1024 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
1025 if (!dest) {
1026 while (src_idx--)
ff7b0479 1027 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
1028 return -ENOMEM;
1029 }
ff7b0479
SB
1030
1031 /* Fill in src buffers */
1032 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1033 u8 *ptr = page_address(xor_srcs[src_idx]);
1034 for (i = 0; i < PAGE_SIZE; i++)
1035 ptr[i] = (1 << src_idx);
1036 }
1037
1038 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1039 cmp_byte ^= (u8) (1 << src_idx);
1040
1041 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1042 (cmp_byte << 8) | cmp_byte;
1043
1044 memset(page_address(dest), 0, PAGE_SIZE);
1045
1046 dma_chan = container_of(device->common.channels.next,
1047 struct dma_chan,
1048 device_node);
aa1e6f1a 1049 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
1050 err = -ENODEV;
1051 goto out;
1052 }
1053
1054 /* test xor */
1055 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1056 DMA_FROM_DEVICE);
1057
1058 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1059 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1060 0, PAGE_SIZE, DMA_TO_DEVICE);
1061
1062 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1063 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1064
1065 cookie = mv_xor_tx_submit(tx);
1066 mv_xor_issue_pending(dma_chan);
1067 async_tx_ack(tx);
1068 msleep(8);
1069
07934481 1070 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479
SB
1071 DMA_SUCCESS) {
1072 dev_printk(KERN_ERR, dma_chan->device->dev,
1073 "Self-test xor timed out, disabling\n");
1074 err = -ENODEV;
1075 goto free_resources;
1076 }
1077
1078 mv_chan = to_mv_xor_chan(dma_chan);
1079 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1080 PAGE_SIZE, DMA_FROM_DEVICE);
1081 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1082 u32 *ptr = page_address(dest);
1083 if (ptr[i] != cmp_word) {
1084 dev_printk(KERN_ERR, dma_chan->device->dev,
1085 "Self-test xor failed compare, disabling."
1086 " index %d, data %x, expected %x\n", i,
1087 ptr[i], cmp_word);
1088 err = -ENODEV;
1089 goto free_resources;
1090 }
1091 }
1092
1093free_resources:
1094 mv_xor_free_chan_resources(dma_chan);
1095out:
1096 src_idx = MV_XOR_NUM_SRC_TEST;
1097 while (src_idx--)
1098 __free_page(xor_srcs[src_idx]);
1099 __free_page(dest);
1100 return err;
1101}
1102
1103static int __devexit mv_xor_remove(struct platform_device *dev)
1104{
1105 struct mv_xor_device *device = platform_get_drvdata(dev);
1106 struct dma_chan *chan, *_chan;
1107 struct mv_xor_chan *mv_chan;
1108 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1109
1110 dma_async_device_unregister(&device->common);
1111
1112 dma_free_coherent(&dev->dev, plat_data->pool_size,
1113 device->dma_desc_pool_virt, device->dma_desc_pool);
1114
1115 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1116 device_node) {
1117 mv_chan = to_mv_xor_chan(chan);
1118 list_del(&chan->device_node);
1119 }
1120
1121 return 0;
1122}
1123
1124static int __devinit mv_xor_probe(struct platform_device *pdev)
1125{
1126 int ret = 0;
1127 int irq;
1128 struct mv_xor_device *adev;
1129 struct mv_xor_chan *mv_chan;
1130 struct dma_device *dma_dev;
1131 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1132
1133
1134 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1135 if (!adev)
1136 return -ENOMEM;
1137
1138 dma_dev = &adev->common;
1139
1140 /* allocate coherent memory for hardware descriptors
1141 * note: writecombine gives slightly better performance, but
1142 * requires that we explicitly flush the writes
1143 */
1144 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1145 plat_data->pool_size,
1146 &adev->dma_desc_pool,
1147 GFP_KERNEL);
1148 if (!adev->dma_desc_pool_virt)
1149 return -ENOMEM;
1150
1151 adev->id = plat_data->hw_id;
1152
1153 /* discover transaction capabilites from the platform data */
1154 dma_dev->cap_mask = plat_data->cap_mask;
1155 adev->pdev = pdev;
1156 platform_set_drvdata(pdev, adev);
1157
1158 adev->shared = platform_get_drvdata(plat_data->shared);
1159
1160 INIT_LIST_HEAD(&dma_dev->channels);
1161
1162 /* set base routines */
1163 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1164 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1165 dma_dev->device_tx_status = mv_xor_status;
ff7b0479
SB
1166 dma_dev->device_issue_pending = mv_xor_issue_pending;
1167 dma_dev->dev = &pdev->dev;
1168
1169 /* set prep routines based on capability */
1170 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1171 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1172 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1173 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1174 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1175 dma_dev->max_xor = 8;
ff7b0479
SB
1176 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1177 }
1178
1179 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1180 if (!mv_chan) {
1181 ret = -ENOMEM;
1182 goto err_free_dma;
1183 }
1184 mv_chan->device = adev;
1185 mv_chan->idx = plat_data->hw_id;
1186 mv_chan->mmr_base = adev->shared->xor_base;
1187
1188 if (!mv_chan->mmr_base) {
1189 ret = -ENOMEM;
1190 goto err_free_dma;
1191 }
1192 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1193 mv_chan);
1194
1195 /* clear errors before enabling interrupts */
1196 mv_xor_device_clear_err_status(mv_chan);
1197
1198 irq = platform_get_irq(pdev, 0);
1199 if (irq < 0) {
1200 ret = irq;
1201 goto err_free_dma;
1202 }
1203 ret = devm_request_irq(&pdev->dev, irq,
1204 mv_xor_interrupt_handler,
1205 0, dev_name(&pdev->dev), mv_chan);
1206 if (ret)
1207 goto err_free_dma;
1208
1209 mv_chan_unmask_interrupts(mv_chan);
1210
1211 mv_set_mode(mv_chan, DMA_MEMCPY);
1212
1213 spin_lock_init(&mv_chan->lock);
1214 INIT_LIST_HEAD(&mv_chan->chain);
1215 INIT_LIST_HEAD(&mv_chan->completed_slots);
1216 INIT_LIST_HEAD(&mv_chan->all_slots);
ff7b0479
SB
1217 mv_chan->common.device = dma_dev;
1218
1219 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1220
1221 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1222 ret = mv_xor_memcpy_self_test(adev);
1223 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1224 if (ret)
1225 goto err_free_dma;
1226 }
1227
1228 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1229 ret = mv_xor_xor_self_test(adev);
1230 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1231 if (ret)
1232 goto err_free_dma;
1233 }
1234
1235 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1236 "( %s%s%s%s)\n",
1237 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1238 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1239 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1240 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1241
1242 dma_async_device_register(dma_dev);
1243 goto out;
1244
1245 err_free_dma:
1246 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1247 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1248 out:
1249 return ret;
1250}
1251
1252static void
1253mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
63a9332b 1254 const struct mbus_dram_target_info *dram)
ff7b0479
SB
1255{
1256 void __iomem *base = msp->xor_base;
1257 u32 win_enable = 0;
1258 int i;
1259
1260 for (i = 0; i < 8; i++) {
1261 writel(0, base + WINDOW_BASE(i));
1262 writel(0, base + WINDOW_SIZE(i));
1263 if (i < 4)
1264 writel(0, base + WINDOW_REMAP_HIGH(i));
1265 }
1266
1267 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1268 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1269
1270 writel((cs->base & 0xffff0000) |
1271 (cs->mbus_attr << 8) |
1272 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1273 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1274
1275 win_enable |= (1 << i);
1276 win_enable |= 3 << (16 + (2 * i));
1277 }
1278
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1280 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1281}
1282
1283static struct platform_driver mv_xor_driver = {
1284 .probe = mv_xor_probe,
bdf602bd 1285 .remove = __devexit_p(mv_xor_remove),
ff7b0479
SB
1286 .driver = {
1287 .owner = THIS_MODULE,
1288 .name = MV_XOR_NAME,
1289 },
1290};
1291
1292static int mv_xor_shared_probe(struct platform_device *pdev)
1293{
63a9332b 1294 const struct mbus_dram_target_info *dram;
ff7b0479
SB
1295 struct mv_xor_shared_private *msp;
1296 struct resource *res;
1297
1298 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1299
1300 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1301 if (!msp)
1302 return -ENOMEM;
1303
1304 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1305 if (!res)
1306 return -ENODEV;
1307
1308 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1309 resource_size(res));
ff7b0479
SB
1310 if (!msp->xor_base)
1311 return -EBUSY;
1312
1313 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1314 if (!res)
1315 return -ENODEV;
1316
1317 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
4de1ba15 1318 resource_size(res));
ff7b0479
SB
1319 if (!msp->xor_high_base)
1320 return -EBUSY;
1321
1322 platform_set_drvdata(pdev, msp);
1323
1324 /*
1325 * (Re-)program MBUS remapping windows if we are asked to.
1326 */
63a9332b
AL
1327 dram = mv_mbus_dram_info();
1328 if (dram)
1329 mv_xor_conf_mbus_windows(msp, dram);
ff7b0479
SB
1330
1331 return 0;
1332}
1333
1334static int mv_xor_shared_remove(struct platform_device *pdev)
1335{
1336 return 0;
1337}
1338
1339static struct platform_driver mv_xor_shared_driver = {
1340 .probe = mv_xor_shared_probe,
1341 .remove = mv_xor_shared_remove,
1342 .driver = {
1343 .owner = THIS_MODULE,
1344 .name = MV_XOR_SHARED_NAME,
1345 },
1346};
1347
1348
1349static int __init mv_xor_init(void)
1350{
1351 int rc;
1352
1353 rc = platform_driver_register(&mv_xor_shared_driver);
1354 if (!rc) {
1355 rc = platform_driver_register(&mv_xor_driver);
1356 if (rc)
1357 platform_driver_unregister(&mv_xor_shared_driver);
1358 }
1359 return rc;
1360}
1361module_init(mv_xor_init);
1362
1363/* it's currently unsafe to unload this module */
1364#if 0
1365static void __exit mv_xor_exit(void)
1366{
1367 platform_driver_unregister(&mv_xor_driver);
1368 platform_driver_unregister(&mv_xor_shared_driver);
1369 return;
1370}
1371
1372module_exit(mv_xor_exit);
1373#endif
1374
1375MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1376MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1377MODULE_LICENSE("GPL");