]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/mv_xor.c
ioatdma: silence GCC warnings
[mirror_ubuntu-artful-kernel.git] / drivers / dma / mv_xor.c
CommitLineData
ff7b0479
SB
1/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
5a0e3ad6 21#include <linux/slab.h>
ff7b0479
SB
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
c510182b 28#include <linux/clk.h>
f7d12ef5
TP
29#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
c02cecb9 32#include <linux/platform_data/dma-mv_xor.h>
d2ebfb33
RKAL
33
34#include "dmaengine.h"
ff7b0479
SB
35#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
98817b99 40 container_of(chan, struct mv_xor_chan, dmachan)
ff7b0479
SB
41
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
c98c1781 45#define mv_chan_to_devp(chan) \
1ef48a26 46 ((chan)->dmadev.dev)
c98c1781 47
ff7b0479
SB
48static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49{
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
55}
56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx)
65{
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
e03bc654 67 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
ff7b0479
SB
68}
69
70
71static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count)
73{
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
76}
77
78static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79 u32 next_desc_addr)
80{
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
84}
85
86static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
87{
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
90}
91
ff7b0479
SB
92static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
93 dma_addr_t addr)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96 hw_desc->phy_dest_addr = addr;
97}
98
99static int mv_chan_memset_slot_count(size_t len)
100{
101 return 1;
102}
103
104#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105
106static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107 int index, dma_addr_t addr)
108{
109 struct mv_xor_desc *hw_desc = desc->hw_desc;
e03bc654 110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
ff7b0479
SB
111 if (desc->type == DMA_XOR)
112 hw_desc->desc_command |= (1 << index);
113}
114
115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116{
5733c38a 117 return readl_relaxed(XOR_CURR_DESC(chan));
ff7b0479
SB
118}
119
120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121 u32 next_desc_addr)
122{
5733c38a 123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
ff7b0479
SB
124}
125
ff7b0479
SB
126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127{
5733c38a 128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
ff7b0479 129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
5733c38a 130 writel_relaxed(val, XOR_INTR_MASK(chan));
ff7b0479
SB
131}
132
133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134{
5733c38a 135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
ff7b0479
SB
136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137 return intr_cause;
138}
139
140static int mv_is_err_intr(u32 intr_cause)
141{
142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
143 return 1;
144
145 return 0;
146}
147
148static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149{
86363682 150 u32 val = ~(1 << (chan->idx * 16));
c98c1781 151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
5733c38a 152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b0479
SB
153}
154
155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156{
157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
5733c38a 158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
ff7b0479
SB
159}
160
161static int mv_can_chain(struct mv_xor_desc_slot *desc)
162{
163 struct mv_xor_desc_slot *chain_old_tail = list_entry(
164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
165
166 if (chain_old_tail->type != desc->type)
167 return 0;
ff7b0479
SB
168
169 return 1;
170}
171
172static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type)
174{
175 u32 op_mode;
5733c38a 176 u32 config = readl_relaxed(XOR_CONFIG(chan));
ff7b0479
SB
177
178 switch (type) {
179 case DMA_XOR:
180 op_mode = XOR_OPERATION_MODE_XOR;
181 break;
182 case DMA_MEMCPY:
183 op_mode = XOR_OPERATION_MODE_MEMCPY;
184 break;
ff7b0479 185 default:
c98c1781 186 dev_err(mv_chan_to_devp(chan),
1ba151cd 187 "error: unsupported operation %d\n",
a3fc74bc 188 type);
ff7b0479
SB
189 BUG();
190 return;
191 }
192
193 config &= ~0x7;
194 config |= op_mode;
e03bc654
TP
195
196#if defined(__BIG_ENDIAN)
197 config |= XOR_DESCRIPTOR_SWAP;
198#else
199 config &= ~XOR_DESCRIPTOR_SWAP;
200#endif
201
5733c38a 202 writel_relaxed(config, XOR_CONFIG(chan));
ff7b0479
SB
203 chan->current_type = type;
204}
205
206static void mv_chan_activate(struct mv_xor_chan *chan)
207{
208 u32 activation;
209
c98c1781 210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
5733c38a 211 activation = readl_relaxed(XOR_ACTIVATION(chan));
ff7b0479 212 activation |= 0x1;
5733c38a 213 writel_relaxed(activation, XOR_ACTIVATION(chan));
ff7b0479
SB
214}
215
216static char mv_chan_is_busy(struct mv_xor_chan *chan)
217{
5733c38a 218 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
ff7b0479
SB
219
220 state = (state >> 4) & 0x3;
221
222 return (state == 1) ? 1 : 0;
223}
224
225static int mv_chan_xor_slot_count(size_t len, int src_cnt)
226{
227 return 1;
228}
229
230/**
231 * mv_xor_free_slots - flags descriptor slots for reuse
232 * @slot: Slot to free
233 * Caller must hold &mv_chan->lock while calling this function
234 */
235static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236 struct mv_xor_desc_slot *slot)
237{
c98c1781 238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
ff7b0479
SB
239 __func__, __LINE__, slot);
240
241 slot->slots_per_op = 0;
242
243}
244
245/*
246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
247 * sw_desc
248 * Caller must hold &mv_chan->lock while calling this function
249 */
250static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *sw_desc)
252{
c98c1781 253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
ff7b0479
SB
254 __func__, __LINE__, sw_desc);
255 if (sw_desc->type != mv_chan->current_type)
256 mv_set_mode(mv_chan, sw_desc->type);
257
48a9db46
BZ
258 /* set the hardware chain */
259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
260
ff7b0479 261 mv_chan->pending += sw_desc->slot_cnt;
98817b99 262 mv_xor_issue_pending(&mv_chan->dmachan);
ff7b0479
SB
263}
264
265static dma_cookie_t
266mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
268{
269 BUG_ON(desc->async_tx.cookie < 0);
270
271 if (desc->async_tx.cookie > 0) {
272 cookie = desc->async_tx.cookie;
273
274 /* call the callback (must not sleep or submit new
275 * operations to this channel)
276 */
277 if (desc->async_tx.callback)
278 desc->async_tx.callback(
279 desc->async_tx.callback_param);
280
281 /* unmap dma addresses
282 * (unmap_single vs unmap_page?)
283 */
284 if (desc->group_head && desc->unmap_len) {
285 struct mv_xor_desc_slot *unmap = desc->group_head;
ecde6cd4 286 struct device *dev = mv_chan_to_devp(mv_chan);
ff7b0479 287 u32 len = unmap->unmap_len;
e1d181ef
DW
288 enum dma_ctrl_flags flags = desc->async_tx.flags;
289 u32 src_cnt;
290 dma_addr_t addr;
a06d568f 291 dma_addr_t dest;
ff7b0479 292
a06d568f
DW
293 src_cnt = unmap->unmap_src_cnt;
294 dest = mv_desc_get_dest_addr(unmap);
e1d181ef 295 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
a06d568f
DW
296 enum dma_data_direction dir;
297
298 if (src_cnt > 1) /* is xor ? */
299 dir = DMA_BIDIRECTIONAL;
300 else
301 dir = DMA_FROM_DEVICE;
302 dma_unmap_page(dev, dest, len, dir);
e1d181ef
DW
303 }
304
305 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
e1d181ef
DW
306 while (src_cnt--) {
307 addr = mv_desc_get_src_addr(unmap,
308 src_cnt);
a06d568f
DW
309 if (addr == dest)
310 continue;
e1d181ef
DW
311 dma_unmap_page(dev, addr, len,
312 DMA_TO_DEVICE);
313 }
ff7b0479
SB
314 }
315 desc->group_head = NULL;
316 }
317 }
318
319 /* run dependent operations */
07f2211e 320 dma_run_dependencies(&desc->async_tx);
ff7b0479
SB
321
322 return cookie;
323}
324
325static int
326mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
327{
328 struct mv_xor_desc_slot *iter, *_iter;
329
c98c1781 330 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
ff7b0479
SB
331 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
332 completed_node) {
333
334 if (async_tx_test_ack(&iter->async_tx)) {
335 list_del(&iter->completed_node);
336 mv_xor_free_slots(mv_chan, iter);
337 }
338 }
339 return 0;
340}
341
342static int
343mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
344 struct mv_xor_chan *mv_chan)
345{
c98c1781 346 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
ff7b0479
SB
347 __func__, __LINE__, desc, desc->async_tx.flags);
348 list_del(&desc->chain_node);
349 /* the client is allowed to attach dependent operations
350 * until 'ack' is set
351 */
352 if (!async_tx_test_ack(&desc->async_tx)) {
353 /* move this slot to the completed_slots */
354 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
355 return 0;
356 }
357
358 mv_xor_free_slots(mv_chan, desc);
359 return 0;
360}
361
362static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
363{
364 struct mv_xor_desc_slot *iter, *_iter;
365 dma_cookie_t cookie = 0;
366 int busy = mv_chan_is_busy(mv_chan);
367 u32 current_desc = mv_chan_get_current_desc(mv_chan);
368 int seen_current = 0;
369
c98c1781
TP
370 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
371 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
ff7b0479
SB
372 mv_xor_clean_completed_slots(mv_chan);
373
374 /* free completed slots from the chain starting with
375 * the oldest descriptor
376 */
377
378 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
379 chain_node) {
380 prefetch(_iter);
381 prefetch(&_iter->async_tx);
382
383 /* do not advance past the current descriptor loaded into the
384 * hardware channel, subsequent descriptors are either in
385 * process or have not been submitted
386 */
387 if (seen_current)
388 break;
389
390 /* stop the search if we reach the current descriptor and the
391 * channel is busy
392 */
393 if (iter->async_tx.phys == current_desc) {
394 seen_current = 1;
395 if (busy)
396 break;
397 }
398
399 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
400
401 if (mv_xor_clean_slot(iter, mv_chan))
402 break;
403 }
404
405 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
406 struct mv_xor_desc_slot *chain_head;
407 chain_head = list_entry(mv_chan->chain.next,
408 struct mv_xor_desc_slot,
409 chain_node);
410
411 mv_xor_start_new_chain(mv_chan, chain_head);
412 }
413
414 if (cookie > 0)
98817b99 415 mv_chan->dmachan.completed_cookie = cookie;
ff7b0479
SB
416}
417
418static void
419mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
420{
421 spin_lock_bh(&mv_chan->lock);
422 __mv_xor_slot_cleanup(mv_chan);
423 spin_unlock_bh(&mv_chan->lock);
424}
425
426static void mv_xor_tasklet(unsigned long data)
427{
428 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
8333f65e 429 mv_xor_slot_cleanup(chan);
ff7b0479
SB
430}
431
432static struct mv_xor_desc_slot *
433mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
434 int slots_per_op)
435{
436 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
437 LIST_HEAD(chain);
438 int slots_found, retry = 0;
439
440 /* start search from the last allocated descrtiptor
441 * if a contiguous allocation can not be found start searching
442 * from the beginning of the list
443 */
444retry:
445 slots_found = 0;
446 if (retry == 0)
447 iter = mv_chan->last_used;
448 else
449 iter = list_entry(&mv_chan->all_slots,
450 struct mv_xor_desc_slot,
451 slot_node);
452
453 list_for_each_entry_safe_continue(
454 iter, _iter, &mv_chan->all_slots, slot_node) {
455 prefetch(_iter);
456 prefetch(&_iter->async_tx);
457 if (iter->slots_per_op) {
458 /* give up after finding the first busy slot
459 * on the second pass through the list
460 */
461 if (retry)
462 break;
463
464 slots_found = 0;
465 continue;
466 }
467
468 /* start the allocation if the slot is correctly aligned */
469 if (!slots_found++)
470 alloc_start = iter;
471
472 if (slots_found == num_slots) {
473 struct mv_xor_desc_slot *alloc_tail = NULL;
474 struct mv_xor_desc_slot *last_used = NULL;
475 iter = alloc_start;
476 while (num_slots) {
477 int i;
478
479 /* pre-ack all but the last descriptor */
480 async_tx_ack(&iter->async_tx);
481
482 list_add_tail(&iter->chain_node, &chain);
483 alloc_tail = iter;
484 iter->async_tx.cookie = 0;
485 iter->slot_cnt = num_slots;
486 iter->xor_check_result = NULL;
487 for (i = 0; i < slots_per_op; i++) {
488 iter->slots_per_op = slots_per_op - i;
489 last_used = iter;
490 iter = list_entry(iter->slot_node.next,
491 struct mv_xor_desc_slot,
492 slot_node);
493 }
494 num_slots -= slots_per_op;
495 }
496 alloc_tail->group_head = alloc_start;
497 alloc_tail->async_tx.cookie = -EBUSY;
64203b67 498 list_splice(&chain, &alloc_tail->tx_list);
ff7b0479
SB
499 mv_chan->last_used = last_used;
500 mv_desc_clear_next_desc(alloc_start);
501 mv_desc_clear_next_desc(alloc_tail);
502 return alloc_tail;
503 }
504 }
505 if (!retry++)
506 goto retry;
507
508 /* try to free some slots if the allocation fails */
509 tasklet_schedule(&mv_chan->irq_tasklet);
510
511 return NULL;
512}
513
ff7b0479
SB
514/************************ DMA engine API functions ****************************/
515static dma_cookie_t
516mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
517{
518 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
519 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
520 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
521 dma_cookie_t cookie;
522 int new_hw_chain = 1;
523
c98c1781 524 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
525 "%s sw_desc %p: async_tx %p\n",
526 __func__, sw_desc, &sw_desc->async_tx);
527
528 grp_start = sw_desc->group_head;
529
530 spin_lock_bh(&mv_chan->lock);
884485e1 531 cookie = dma_cookie_assign(tx);
ff7b0479
SB
532
533 if (list_empty(&mv_chan->chain))
64203b67 534 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
ff7b0479
SB
535 else {
536 new_hw_chain = 0;
537
538 old_chain_tail = list_entry(mv_chan->chain.prev,
539 struct mv_xor_desc_slot,
540 chain_node);
64203b67 541 list_splice_init(&grp_start->tx_list,
ff7b0479
SB
542 &old_chain_tail->chain_node);
543
544 if (!mv_can_chain(grp_start))
545 goto submit_done;
546
c98c1781 547 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
ff7b0479
SB
548 old_chain_tail->async_tx.phys);
549
550 /* fix up the hardware chain */
551 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
552
553 /* if the channel is not busy */
554 if (!mv_chan_is_busy(mv_chan)) {
555 u32 current_desc = mv_chan_get_current_desc(mv_chan);
556 /*
557 * and the curren desc is the end of the chain before
558 * the append, then we need to start the channel
559 */
560 if (current_desc == old_chain_tail->async_tx.phys)
561 new_hw_chain = 1;
562 }
563 }
564
565 if (new_hw_chain)
566 mv_xor_start_new_chain(mv_chan, grp_start);
567
568submit_done:
569 spin_unlock_bh(&mv_chan->lock);
570
571 return cookie;
572}
573
574/* returns the number of allocated descriptors */
aa1e6f1a 575static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
ff7b0479
SB
576{
577 char *hw_desc;
578 int idx;
579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580 struct mv_xor_desc_slot *slot = NULL;
b503fa01 581 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
ff7b0479
SB
582
583 /* Allocate descriptor slots */
584 idx = mv_chan->slots_allocated;
585 while (idx < num_descs_in_pool) {
586 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
587 if (!slot) {
588 printk(KERN_INFO "MV XOR Channel only initialized"
589 " %d descriptor slots", idx);
590 break;
591 }
1ef48a26 592 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
ff7b0479
SB
593 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
594
595 dma_async_tx_descriptor_init(&slot->async_tx, chan);
596 slot->async_tx.tx_submit = mv_xor_tx_submit;
597 INIT_LIST_HEAD(&slot->chain_node);
598 INIT_LIST_HEAD(&slot->slot_node);
64203b67 599 INIT_LIST_HEAD(&slot->tx_list);
1ef48a26 600 hw_desc = (char *) mv_chan->dma_desc_pool;
ff7b0479
SB
601 slot->async_tx.phys =
602 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
603 slot->idx = idx++;
604
605 spin_lock_bh(&mv_chan->lock);
606 mv_chan->slots_allocated = idx;
607 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
608 spin_unlock_bh(&mv_chan->lock);
609 }
610
611 if (mv_chan->slots_allocated && !mv_chan->last_used)
612 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
613 struct mv_xor_desc_slot,
614 slot_node);
615
c98c1781 616 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
617 "allocated %d descriptor slots last_used: %p\n",
618 mv_chan->slots_allocated, mv_chan->last_used);
619
620 return mv_chan->slots_allocated ? : -ENOMEM;
621}
622
623static struct dma_async_tx_descriptor *
624mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
625 size_t len, unsigned long flags)
626{
627 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
628 struct mv_xor_desc_slot *sw_desc, *grp_start;
629 int slot_cnt;
630
c98c1781 631 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
632 "%s dest: %x src %x len: %u flags: %ld\n",
633 __func__, dest, src, len, flags);
634 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
635 return NULL;
636
7912d300 637 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479
SB
638
639 spin_lock_bh(&mv_chan->lock);
640 slot_cnt = mv_chan_memcpy_slot_count(len);
641 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
642 if (sw_desc) {
643 sw_desc->type = DMA_MEMCPY;
644 sw_desc->async_tx.flags = flags;
645 grp_start = sw_desc->group_head;
646 mv_desc_init(grp_start, flags);
647 mv_desc_set_byte_count(grp_start, len);
648 mv_desc_set_dest_addr(sw_desc->group_head, dest);
649 mv_desc_set_src_addr(grp_start, 0, src);
650 sw_desc->unmap_src_cnt = 1;
651 sw_desc->unmap_len = len;
652 }
653 spin_unlock_bh(&mv_chan->lock);
654
c98c1781 655 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
656 "%s sw_desc %p async_tx %p\n",
657 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
658
659 return sw_desc ? &sw_desc->async_tx : NULL;
660}
661
ff7b0479
SB
662static struct dma_async_tx_descriptor *
663mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
664 unsigned int src_cnt, size_t len, unsigned long flags)
665{
666 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
667 struct mv_xor_desc_slot *sw_desc, *grp_start;
668 int slot_cnt;
669
670 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
671 return NULL;
672
7912d300 673 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
ff7b0479 674
c98c1781 675 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
676 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
677 __func__, src_cnt, len, dest, flags);
678
679 spin_lock_bh(&mv_chan->lock);
680 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
681 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
682 if (sw_desc) {
683 sw_desc->type = DMA_XOR;
684 sw_desc->async_tx.flags = flags;
685 grp_start = sw_desc->group_head;
686 mv_desc_init(grp_start, flags);
687 /* the byte count field is the same as in memcpy desc*/
688 mv_desc_set_byte_count(grp_start, len);
689 mv_desc_set_dest_addr(sw_desc->group_head, dest);
690 sw_desc->unmap_src_cnt = src_cnt;
691 sw_desc->unmap_len = len;
692 while (src_cnt--)
693 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
694 }
695 spin_unlock_bh(&mv_chan->lock);
c98c1781 696 dev_dbg(mv_chan_to_devp(mv_chan),
ff7b0479
SB
697 "%s sw_desc %p async_tx %p \n",
698 __func__, sw_desc, &sw_desc->async_tx);
699 return sw_desc ? &sw_desc->async_tx : NULL;
700}
701
702static void mv_xor_free_chan_resources(struct dma_chan *chan)
703{
704 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
705 struct mv_xor_desc_slot *iter, *_iter;
706 int in_use_descs = 0;
707
708 mv_xor_slot_cleanup(mv_chan);
709
710 spin_lock_bh(&mv_chan->lock);
711 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
712 chain_node) {
713 in_use_descs++;
714 list_del(&iter->chain_node);
715 }
716 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
717 completed_node) {
718 in_use_descs++;
719 list_del(&iter->completed_node);
720 }
721 list_for_each_entry_safe_reverse(
722 iter, _iter, &mv_chan->all_slots, slot_node) {
723 list_del(&iter->slot_node);
724 kfree(iter);
725 mv_chan->slots_allocated--;
726 }
727 mv_chan->last_used = NULL;
728
c98c1781 729 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
ff7b0479
SB
730 __func__, mv_chan->slots_allocated);
731 spin_unlock_bh(&mv_chan->lock);
732
733 if (in_use_descs)
c98c1781 734 dev_err(mv_chan_to_devp(mv_chan),
ff7b0479
SB
735 "freeing %d in use descriptors!\n", in_use_descs);
736}
737
738/**
07934481 739 * mv_xor_status - poll the status of an XOR transaction
ff7b0479
SB
740 * @chan: XOR channel handle
741 * @cookie: XOR transaction identifier
07934481 742 * @txstate: XOR transactions state holder (or NULL)
ff7b0479 743 */
07934481 744static enum dma_status mv_xor_status(struct dma_chan *chan,
ff7b0479 745 dma_cookie_t cookie,
07934481 746 struct dma_tx_state *txstate)
ff7b0479
SB
747{
748 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
ff7b0479
SB
749 enum dma_status ret;
750
96a2af41 751 ret = dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
752 if (ret == DMA_SUCCESS) {
753 mv_xor_clean_completed_slots(mv_chan);
754 return ret;
755 }
756 mv_xor_slot_cleanup(mv_chan);
757
96a2af41 758 return dma_cookie_status(chan, cookie, txstate);
ff7b0479
SB
759}
760
761static void mv_dump_xor_regs(struct mv_xor_chan *chan)
762{
763 u32 val;
764
5733c38a 765 val = readl_relaxed(XOR_CONFIG(chan));
1ba151cd 766 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
ff7b0479 767
5733c38a 768 val = readl_relaxed(XOR_ACTIVATION(chan));
1ba151cd 769 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
ff7b0479 770
5733c38a 771 val = readl_relaxed(XOR_INTR_CAUSE(chan));
1ba151cd 772 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
ff7b0479 773
5733c38a 774 val = readl_relaxed(XOR_INTR_MASK(chan));
1ba151cd 775 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
ff7b0479 776
5733c38a 777 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
1ba151cd 778 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
ff7b0479 779
5733c38a 780 val = readl_relaxed(XOR_ERROR_ADDR(chan));
1ba151cd 781 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
ff7b0479
SB
782}
783
784static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
785 u32 intr_cause)
786{
787 if (intr_cause & (1 << 4)) {
c98c1781 788 dev_dbg(mv_chan_to_devp(chan),
ff7b0479
SB
789 "ignore this error\n");
790 return;
791 }
792
c98c1781 793 dev_err(mv_chan_to_devp(chan),
1ba151cd 794 "error on chan %d. intr cause 0x%08x\n",
a3fc74bc 795 chan->idx, intr_cause);
ff7b0479
SB
796
797 mv_dump_xor_regs(chan);
798 BUG();
799}
800
801static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
802{
803 struct mv_xor_chan *chan = data;
804 u32 intr_cause = mv_chan_get_intr_cause(chan);
805
c98c1781 806 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
ff7b0479
SB
807
808 if (mv_is_err_intr(intr_cause))
809 mv_xor_err_interrupt_handler(chan, intr_cause);
810
811 tasklet_schedule(&chan->irq_tasklet);
812
813 mv_xor_device_clear_eoc_cause(chan);
814
815 return IRQ_HANDLED;
816}
817
818static void mv_xor_issue_pending(struct dma_chan *chan)
819{
820 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
821
822 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
823 mv_chan->pending = 0;
824 mv_chan_activate(mv_chan);
825 }
826}
827
828/*
829 * Perform a transaction to verify the HW works.
830 */
831#define MV_XOR_TEST_SIZE 2000
832
c2714334 833static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
ff7b0479
SB
834{
835 int i;
836 void *src, *dest;
837 dma_addr_t src_dma, dest_dma;
838 struct dma_chan *dma_chan;
839 dma_cookie_t cookie;
840 struct dma_async_tx_descriptor *tx;
841 int err = 0;
ff7b0479
SB
842
843 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
844 if (!src)
845 return -ENOMEM;
846
847 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
848 if (!dest) {
849 kfree(src);
850 return -ENOMEM;
851 }
852
853 /* Fill in src buffer */
854 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
855 ((u8 *) src)[i] = (u8)i;
856
275cc0c8 857 dma_chan = &mv_chan->dmachan;
aa1e6f1a 858 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
859 err = -ENODEV;
860 goto out;
861 }
862
863 dest_dma = dma_map_single(dma_chan->device->dev, dest,
864 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
865
866 src_dma = dma_map_single(dma_chan->device->dev, src,
867 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
868
869 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
870 MV_XOR_TEST_SIZE, 0);
871 cookie = mv_xor_tx_submit(tx);
872 mv_xor_issue_pending(dma_chan);
873 async_tx_ack(tx);
874 msleep(1);
875
07934481 876 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 877 DMA_SUCCESS) {
a3fc74bc
TP
878 dev_err(dma_chan->device->dev,
879 "Self-test copy timed out, disabling\n");
ff7b0479
SB
880 err = -ENODEV;
881 goto free_resources;
882 }
883
c35064c4 884 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
885 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
886 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
a3fc74bc
TP
887 dev_err(dma_chan->device->dev,
888 "Self-test copy failed compare, disabling\n");
ff7b0479
SB
889 err = -ENODEV;
890 goto free_resources;
891 }
892
893free_resources:
894 mv_xor_free_chan_resources(dma_chan);
895out:
896 kfree(src);
897 kfree(dest);
898 return err;
899}
900
901#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
463a1f8b 902static int
275cc0c8 903mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
ff7b0479
SB
904{
905 int i, src_idx;
906 struct page *dest;
907 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
908 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
909 dma_addr_t dest_dma;
910 struct dma_async_tx_descriptor *tx;
911 struct dma_chan *dma_chan;
912 dma_cookie_t cookie;
913 u8 cmp_byte = 0;
914 u32 cmp_word;
915 int err = 0;
ff7b0479
SB
916
917 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
918 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
a09b09ae
RK
919 if (!xor_srcs[src_idx]) {
920 while (src_idx--)
ff7b0479 921 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
922 return -ENOMEM;
923 }
ff7b0479
SB
924 }
925
926 dest = alloc_page(GFP_KERNEL);
a09b09ae
RK
927 if (!dest) {
928 while (src_idx--)
ff7b0479 929 __free_page(xor_srcs[src_idx]);
a09b09ae
RK
930 return -ENOMEM;
931 }
ff7b0479
SB
932
933 /* Fill in src buffers */
934 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
935 u8 *ptr = page_address(xor_srcs[src_idx]);
936 for (i = 0; i < PAGE_SIZE; i++)
937 ptr[i] = (1 << src_idx);
938 }
939
940 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
941 cmp_byte ^= (u8) (1 << src_idx);
942
943 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
944 (cmp_byte << 8) | cmp_byte;
945
946 memset(page_address(dest), 0, PAGE_SIZE);
947
275cc0c8 948 dma_chan = &mv_chan->dmachan;
aa1e6f1a 949 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
ff7b0479
SB
950 err = -ENODEV;
951 goto out;
952 }
953
954 /* test xor */
955 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
956 DMA_FROM_DEVICE);
957
958 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
959 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
960 0, PAGE_SIZE, DMA_TO_DEVICE);
961
962 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
963 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
964
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(8);
969
07934481 970 if (mv_xor_status(dma_chan, cookie, NULL) !=
ff7b0479 971 DMA_SUCCESS) {
a3fc74bc
TP
972 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n");
ff7b0479
SB
974 err = -ENODEV;
975 goto free_resources;
976 }
977
c35064c4 978 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
ff7b0479
SB
979 PAGE_SIZE, DMA_FROM_DEVICE);
980 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
981 u32 *ptr = page_address(dest);
982 if (ptr[i] != cmp_word) {
a3fc74bc 983 dev_err(dma_chan->device->dev,
1ba151cd
JP
984 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
985 i, ptr[i], cmp_word);
ff7b0479
SB
986 err = -ENODEV;
987 goto free_resources;
988 }
989 }
990
991free_resources:
992 mv_xor_free_chan_resources(dma_chan);
993out:
994 src_idx = MV_XOR_NUM_SRC_TEST;
995 while (src_idx--)
996 __free_page(xor_srcs[src_idx]);
997 __free_page(dest);
998 return err;
999}
1000
34c93c86
AL
1001/* This driver does not implement any of the optional DMA operations. */
1002static int
1003mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1004 unsigned long arg)
1005{
1006 return -ENOSYS;
1007}
1008
1ef48a26 1009static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
ff7b0479 1010{
ff7b0479 1011 struct dma_chan *chan, *_chan;
1ef48a26 1012 struct device *dev = mv_chan->dmadev.dev;
ff7b0479 1013
1ef48a26 1014 dma_async_device_unregister(&mv_chan->dmadev);
ff7b0479 1015
b503fa01 1016 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
1ef48a26 1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
ff7b0479 1018
1ef48a26 1019 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
a6b4a9d2 1020 device_node) {
ff7b0479
SB
1021 list_del(&chan->device_node);
1022 }
1023
88eb92cb
TP
1024 free_irq(mv_chan->irq, mv_chan);
1025
ff7b0479
SB
1026 return 0;
1027}
1028
1ef48a26 1029static struct mv_xor_chan *
297eedba 1030mv_xor_channel_add(struct mv_xor_device *xordev,
a6b4a9d2 1031 struct platform_device *pdev,
b503fa01 1032 int idx, dma_cap_mask_t cap_mask, int irq)
ff7b0479
SB
1033{
1034 int ret = 0;
ff7b0479
SB
1035 struct mv_xor_chan *mv_chan;
1036 struct dma_device *dma_dev;
ff7b0479 1037
1ef48a26
TP
1038 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1039 if (!mv_chan) {
1040 ret = -ENOMEM;
1041 goto err_free_dma;
1042 }
ff7b0479 1043
9aedbdba 1044 mv_chan->idx = idx;
88eb92cb 1045 mv_chan->irq = irq;
ff7b0479 1046
1ef48a26 1047 dma_dev = &mv_chan->dmadev;
ff7b0479
SB
1048
1049 /* allocate coherent memory for hardware descriptors
1050 * note: writecombine gives slightly better performance, but
1051 * requires that we explicitly flush the writes
1052 */
1ef48a26 1053 mv_chan->dma_desc_pool_virt =
b503fa01 1054 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a26
TP
1055 &mv_chan->dma_desc_pool, GFP_KERNEL);
1056 if (!mv_chan->dma_desc_pool_virt)
a6b4a9d2 1057 return ERR_PTR(-ENOMEM);
ff7b0479
SB
1058
1059 /* discover transaction capabilites from the platform data */
a6b4a9d2 1060 dma_dev->cap_mask = cap_mask;
ff7b0479
SB
1061
1062 INIT_LIST_HEAD(&dma_dev->channels);
1063
1064 /* set base routines */
1065 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1066 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
07934481 1067 dma_dev->device_tx_status = mv_xor_status;
ff7b0479 1068 dma_dev->device_issue_pending = mv_xor_issue_pending;
34c93c86 1069 dma_dev->device_control = mv_xor_control;
ff7b0479
SB
1070 dma_dev->dev = &pdev->dev;
1071
1072 /* set prep routines based on capability */
1073 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1074 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
ff7b0479 1075 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
c019894e 1076 dma_dev->max_xor = 8;
ff7b0479
SB
1077 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1078 }
1079
297eedba 1080 mv_chan->mmr_base = xordev->xor_base;
ff7b0479
SB
1081 if (!mv_chan->mmr_base) {
1082 ret = -ENOMEM;
1083 goto err_free_dma;
1084 }
1085 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1086 mv_chan);
1087
1088 /* clear errors before enabling interrupts */
1089 mv_xor_device_clear_err_status(mv_chan);
1090
2d0a0745
TP
1091 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1092 0, dev_name(&pdev->dev), mv_chan);
ff7b0479
SB
1093 if (ret)
1094 goto err_free_dma;
1095
1096 mv_chan_unmask_interrupts(mv_chan);
1097
1098 mv_set_mode(mv_chan, DMA_MEMCPY);
1099
1100 spin_lock_init(&mv_chan->lock);
1101 INIT_LIST_HEAD(&mv_chan->chain);
1102 INIT_LIST_HEAD(&mv_chan->completed_slots);
1103 INIT_LIST_HEAD(&mv_chan->all_slots);
98817b99
TP
1104 mv_chan->dmachan.device = dma_dev;
1105 dma_cookie_init(&mv_chan->dmachan);
ff7b0479 1106
98817b99 1107 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
ff7b0479
SB
1108
1109 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
275cc0c8 1110 ret = mv_xor_memcpy_self_test(mv_chan);
ff7b0479
SB
1111 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1112 if (ret)
2d0a0745 1113 goto err_free_irq;
ff7b0479
SB
1114 }
1115
1116 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
275cc0c8 1117 ret = mv_xor_xor_self_test(mv_chan);
ff7b0479
SB
1118 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1119 if (ret)
2d0a0745 1120 goto err_free_irq;
ff7b0479
SB
1121 }
1122
48a9db46 1123 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1ba151cd 1124 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1ba151cd
JP
1125 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1126 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
ff7b0479
SB
1127
1128 dma_async_device_register(dma_dev);
1ef48a26 1129 return mv_chan;
ff7b0479 1130
2d0a0745
TP
1131err_free_irq:
1132 free_irq(mv_chan->irq, mv_chan);
ff7b0479 1133 err_free_dma:
b503fa01 1134 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1ef48a26 1135 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
a6b4a9d2 1136 return ERR_PTR(ret);
ff7b0479
SB
1137}
1138
1139static void
297eedba 1140mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
63a9332b 1141 const struct mbus_dram_target_info *dram)
ff7b0479 1142{
297eedba 1143 void __iomem *base = xordev->xor_base;
ff7b0479
SB
1144 u32 win_enable = 0;
1145 int i;
1146
1147 for (i = 0; i < 8; i++) {
1148 writel(0, base + WINDOW_BASE(i));
1149 writel(0, base + WINDOW_SIZE(i));
1150 if (i < 4)
1151 writel(0, base + WINDOW_REMAP_HIGH(i));
1152 }
1153
1154 for (i = 0; i < dram->num_cs; i++) {
63a9332b 1155 const struct mbus_dram_window *cs = dram->cs + i;
ff7b0479
SB
1156
1157 writel((cs->base & 0xffff0000) |
1158 (cs->mbus_attr << 8) |
1159 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1160 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1161
1162 win_enable |= (1 << i);
1163 win_enable |= 3 << (16 + (2 * i));
1164 }
1165
1166 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1167 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
c4b4b732
TP
1168 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1169 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
ff7b0479
SB
1170}
1171
c2714334 1172static int mv_xor_probe(struct platform_device *pdev)
ff7b0479 1173{
63a9332b 1174 const struct mbus_dram_target_info *dram;
297eedba 1175 struct mv_xor_device *xordev;
7dde453d 1176 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
ff7b0479 1177 struct resource *res;
60d151f3 1178 int i, ret;
ff7b0479 1179
1ba151cd 1180 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
ff7b0479 1181
297eedba
TP
1182 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1183 if (!xordev)
ff7b0479
SB
1184 return -ENOMEM;
1185
1186 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1187 if (!res)
1188 return -ENODEV;
1189
297eedba
TP
1190 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1191 resource_size(res));
1192 if (!xordev->xor_base)
ff7b0479
SB
1193 return -EBUSY;
1194
1195 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1196 if (!res)
1197 return -ENODEV;
1198
297eedba
TP
1199 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1200 resource_size(res));
1201 if (!xordev->xor_high_base)
ff7b0479
SB
1202 return -EBUSY;
1203
297eedba 1204 platform_set_drvdata(pdev, xordev);
ff7b0479
SB
1205
1206 /*
1207 * (Re-)program MBUS remapping windows if we are asked to.
1208 */
63a9332b
AL
1209 dram = mv_mbus_dram_info();
1210 if (dram)
297eedba 1211 mv_xor_conf_mbus_windows(xordev, dram);
ff7b0479 1212
c510182b
AL
1213 /* Not all platforms can gate the clock, so it is not
1214 * an error if the clock does not exists.
1215 */
297eedba
TP
1216 xordev->clk = clk_get(&pdev->dev, NULL);
1217 if (!IS_ERR(xordev->clk))
1218 clk_prepare_enable(xordev->clk);
c510182b 1219
f7d12ef5
TP
1220 if (pdev->dev.of_node) {
1221 struct device_node *np;
1222 int i = 0;
1223
1224 for_each_child_of_node(pdev->dev.of_node, np) {
1225 dma_cap_mask_t cap_mask;
1226 int irq;
1227
1228 dma_cap_zero(cap_mask);
1229 if (of_property_read_bool(np, "dmacap,memcpy"))
1230 dma_cap_set(DMA_MEMCPY, cap_mask);
1231 if (of_property_read_bool(np, "dmacap,xor"))
1232 dma_cap_set(DMA_XOR, cap_mask);
f7d12ef5
TP
1233 if (of_property_read_bool(np, "dmacap,interrupt"))
1234 dma_cap_set(DMA_INTERRUPT, cap_mask);
1235
1236 irq = irq_of_parse_and_map(np, 0);
f8eb9e7d
TP
1237 if (!irq) {
1238 ret = -ENODEV;
f7d12ef5
TP
1239 goto err_channel_add;
1240 }
1241
1242 xordev->channels[i] =
1243 mv_xor_channel_add(xordev, pdev, i,
1244 cap_mask, irq);
1245 if (IS_ERR(xordev->channels[i])) {
1246 ret = PTR_ERR(xordev->channels[i]);
73d9cdca 1247 xordev->channels[i] = NULL;
f7d12ef5
TP
1248 irq_dispose_mapping(irq);
1249 goto err_channel_add;
1250 }
1251
1252 i++;
1253 }
1254 } else if (pdata && pdata->channels) {
60d151f3 1255 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
e39f6ec1 1256 struct mv_xor_channel_data *cd;
60d151f3
TP
1257 int irq;
1258
1259 cd = &pdata->channels[i];
1260 if (!cd) {
1261 ret = -ENODEV;
1262 goto err_channel_add;
1263 }
1264
1265 irq = platform_get_irq(pdev, i);
1266 if (irq < 0) {
1267 ret = irq;
1268 goto err_channel_add;
1269 }
1270
297eedba 1271 xordev->channels[i] =
9aedbdba 1272 mv_xor_channel_add(xordev, pdev, i,
b503fa01 1273 cd->cap_mask, irq);
297eedba
TP
1274 if (IS_ERR(xordev->channels[i])) {
1275 ret = PTR_ERR(xordev->channels[i]);
60d151f3
TP
1276 goto err_channel_add;
1277 }
1278 }
1279 }
c510182b 1280
ff7b0479 1281 return 0;
60d151f3
TP
1282
1283err_channel_add:
1284 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
f7d12ef5 1285 if (xordev->channels[i]) {
ab6e439f 1286 mv_xor_channel_remove(xordev->channels[i]);
f7d12ef5
TP
1287 if (pdev->dev.of_node)
1288 irq_dispose_mapping(xordev->channels[i]->irq);
f7d12ef5 1289 }
60d151f3 1290
dab92064
TP
1291 if (!IS_ERR(xordev->clk)) {
1292 clk_disable_unprepare(xordev->clk);
1293 clk_put(xordev->clk);
1294 }
1295
60d151f3 1296 return ret;
ff7b0479
SB
1297}
1298
c2714334 1299static int mv_xor_remove(struct platform_device *pdev)
ff7b0479 1300{
297eedba 1301 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
60d151f3
TP
1302 int i;
1303
1304 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
297eedba
TP
1305 if (xordev->channels[i])
1306 mv_xor_channel_remove(xordev->channels[i]);
60d151f3 1307 }
c510182b 1308
297eedba
TP
1309 if (!IS_ERR(xordev->clk)) {
1310 clk_disable_unprepare(xordev->clk);
1311 clk_put(xordev->clk);
c510182b
AL
1312 }
1313
ff7b0479
SB
1314 return 0;
1315}
1316
f7d12ef5 1317#ifdef CONFIG_OF
c2714334 1318static struct of_device_id mv_xor_dt_ids[] = {
f7d12ef5
TP
1319 { .compatible = "marvell,orion-xor", },
1320 {},
1321};
1322MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1323#endif
1324
61971656
TP
1325static struct platform_driver mv_xor_driver = {
1326 .probe = mv_xor_probe,
c2714334 1327 .remove = mv_xor_remove,
ff7b0479 1328 .driver = {
f7d12ef5
TP
1329 .owner = THIS_MODULE,
1330 .name = MV_XOR_NAME,
1331 .of_match_table = of_match_ptr(mv_xor_dt_ids),
ff7b0479
SB
1332 },
1333};
1334
1335
1336static int __init mv_xor_init(void)
1337{
61971656 1338 return platform_driver_register(&mv_xor_driver);
ff7b0479
SB
1339}
1340module_init(mv_xor_init);
1341
1342/* it's currently unsafe to unload this module */
1343#if 0
1344static void __exit mv_xor_exit(void)
1345{
1346 platform_driver_unregister(&mv_xor_driver);
ff7b0479
SB
1347 return;
1348}
1349
1350module_exit(mv_xor_exit);
1351#endif
1352
1353MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1354MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1355MODULE_LICENSE("GPL");