2 * Copyright (C) 2015-2016 Marvell International Ltd.
4 * This program is free software: you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
15 #include <linux/clk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/msi.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/spinlock.h>
26 #include "dmaengine.h"
28 /* DMA Engine Registers */
29 #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
30 #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
31 #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
32 #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
33 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
34 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
35 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
36 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
37 #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
38 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
39 #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
40 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
41 #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
42 #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
43 #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
44 #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
45 #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
46 /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
47 #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
48 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
49 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
50 #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
51 #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
52 #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
53 #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
54 #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
55 #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
56 #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
57 #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
59 /* XOR Global registers */
60 #define MV_XOR_V2_GLOB_BW_CTRL 0x4
61 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
62 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
63 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
64 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
65 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
66 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
67 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
68 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
69 #define MV_XOR_V2_GLOB_PAUSE 0x014
70 #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
71 #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
72 #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
73 #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
74 #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
76 #define MV_XOR_V2_MIN_DESC_SIZE 32
77 #define MV_XOR_V2_EXT_DESC_SIZE 128
79 #define MV_XOR_V2_DESC_RESERVED_SIZE 12
80 #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
82 #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
85 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
86 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
87 * descriptors are allowed. This driver uses 128 bytes descriptors,
88 * but experimentation has shown that a set of 1024 descriptors is
89 * sufficient to reach a good level of performance.
91 #define MV_XOR_V2_DESC_NUM 1024
94 * struct mv_xor_v2_descriptor - DMA HW descriptor
95 * @desc_id: used by S/W and is not affected by H/W.
96 * @flags: error and status flags
97 * @crc32_result: CRC32 calculation result
98 * @desc_ctrl: operation mode and control flags
99 * @buff_size: amount of bytes to be processed
100 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
102 * @data_buff_addr: Source (and might be RAID6 destination)
103 * addresses of data buffers in RAID5 and RAID6
104 * @reserved: reserved
106 struct mv_xor_v2_descriptor
{
112 /* Definitions for desc_ctrl */
113 #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
114 #define DESC_OP_MODE_SHIFT 28
115 #define DESC_OP_MODE_NOP 0 /* Idle operation */
116 #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
117 #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
118 #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
119 #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
120 #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
121 #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
122 #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
123 #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
124 #define DESC_Q_BUFFER_ENABLE BIT(16)
125 #define DESC_P_BUFFER_ENABLE BIT(17)
126 #define DESC_IOD BIT(27)
129 u32 fill_pattern_src_addr
[4];
130 u32 data_buff_addr
[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE
];
131 u32 reserved
[MV_XOR_V2_DESC_RESERVED_SIZE
];
135 * struct mv_xor_v2_device - implements a xor device
136 * @lock: lock for the engine
137 * @dma_base: memory mapped DMA register base
138 * @glob_base: memory mapped global register base
140 * @free_sw_desc: linked list of free SW descriptors
141 * @dmadev: dma device
142 * @dmachan: dma channel
143 * @hw_desq: HW descriptors queue
144 * @hw_desq_virt: virtual address of DESCQ
145 * @sw_desq: SW descriptors queue
146 * @desc_size: HW descriptor size
147 * @npendings: number of pending descriptors (for which tx_submit has
148 * been called, but not yet issue_pending)
150 struct mv_xor_v2_device
{
152 void __iomem
*dma_base
;
153 void __iomem
*glob_base
;
155 struct tasklet_struct irq_tasklet
;
156 struct list_head free_sw_desc
;
157 struct dma_device dmadev
;
158 struct dma_chan dmachan
;
160 struct mv_xor_v2_descriptor
*hw_desq_virt
;
161 struct mv_xor_v2_sw_desc
*sw_desq
;
163 unsigned int npendings
;
164 unsigned int hw_queue_idx
;
168 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
169 * @idx: descriptor index
170 * @async_tx: support for the async_tx api
171 * @hw_desc: assosiated HW descriptor
172 * @free_list: node of the free SW descriprots list
174 struct mv_xor_v2_sw_desc
{
176 struct dma_async_tx_descriptor async_tx
;
177 struct mv_xor_v2_descriptor hw_desc
;
178 struct list_head free_list
;
182 * Fill the data buffers to a HW descriptor
184 static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device
*xor_dev
,
185 struct mv_xor_v2_descriptor
*desc
,
186 dma_addr_t src
, int index
)
188 int arr_index
= ((index
>> 1) * 3);
191 * Fill the buffer's addresses to the descriptor.
193 * The format of the buffers address for 2 sequential buffers
196 * First word: Buffer-DX-Address-Low[31:0]
197 * Second word: Buffer-DX+1-Address-Low[31:0]
198 * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
199 * DX-Buffer-Address-High[47:32] [15:0]
201 if ((index
& 0x1) == 0) {
202 desc
->data_buff_addr
[arr_index
] = lower_32_bits(src
);
204 desc
->data_buff_addr
[arr_index
+ 2] &= ~0xFFFF;
205 desc
->data_buff_addr
[arr_index
+ 2] |=
206 upper_32_bits(src
) & 0xFFFF;
208 desc
->data_buff_addr
[arr_index
+ 1] =
211 desc
->data_buff_addr
[arr_index
+ 2] &= ~0xFFFF0000;
212 desc
->data_buff_addr
[arr_index
+ 2] |=
213 (upper_32_bits(src
) & 0xFFFF) << 16;
218 * notify the engine of new descriptors, and update the available index.
220 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device
*xor_dev
,
223 /* write the number of new descriptors in the DESQ. */
224 writel(num_of_desc
, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_ADD_OFF
);
228 * free HW descriptors
230 static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device
*xor_dev
,
233 /* write the number of new descriptors in the DESQ. */
234 writel(num_of_desc
, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_DEALLOC_OFF
);
238 * Set descriptor size
239 * Return the HW descriptor size in bytes
241 static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device
*xor_dev
)
243 writel(MV_XOR_V2_DMA_DESQ_CTRL_128B
,
244 xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_CTRL_OFF
);
246 return MV_XOR_V2_EXT_DESC_SIZE
;
249 static irqreturn_t
mv_xor_v2_interrupt_handler(int irq
, void *data
)
251 struct mv_xor_v2_device
*xor_dev
= data
;
255 reg
= readl(xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_DONE_OFF
);
257 ndescs
= ((reg
>> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT
) &
258 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK
);
260 /* No descriptors to process */
264 /* schedule a tasklet to handle descriptors callbacks */
265 tasklet_schedule(&xor_dev
->irq_tasklet
);
271 * submit a descriptor to the DMA engine
274 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor
*tx
)
278 struct mv_xor_v2_sw_desc
*sw_desc
=
279 container_of(tx
, struct mv_xor_v2_sw_desc
, async_tx
);
280 struct mv_xor_v2_device
*xor_dev
=
281 container_of(tx
->chan
, struct mv_xor_v2_device
, dmachan
);
283 dev_dbg(xor_dev
->dmadev
.dev
,
284 "%s sw_desc %p: async_tx %p\n",
285 __func__
, sw_desc
, &sw_desc
->async_tx
);
288 spin_lock_bh(&xor_dev
->lock
);
289 cookie
= dma_cookie_assign(tx
);
291 /* copy the HW descriptor from the SW descriptor to the DESQ */
292 dest_hw_desc
= xor_dev
->hw_desq_virt
+ xor_dev
->hw_queue_idx
;
294 memcpy(dest_hw_desc
, &sw_desc
->hw_desc
, xor_dev
->desc_size
);
296 xor_dev
->npendings
++;
297 xor_dev
->hw_queue_idx
++;
298 if (xor_dev
->hw_queue_idx
>= MV_XOR_V2_DESC_NUM
)
299 xor_dev
->hw_queue_idx
= 0;
301 spin_unlock_bh(&xor_dev
->lock
);
307 * Prepare a SW descriptor
309 static struct mv_xor_v2_sw_desc
*
310 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device
*xor_dev
)
312 struct mv_xor_v2_sw_desc
*sw_desc
;
315 /* Lock the channel */
316 spin_lock_bh(&xor_dev
->lock
);
318 if (list_empty(&xor_dev
->free_sw_desc
)) {
319 spin_unlock_bh(&xor_dev
->lock
);
320 /* schedule tasklet to free some descriptors */
321 tasklet_schedule(&xor_dev
->irq_tasklet
);
325 list_for_each_entry(sw_desc
, &xor_dev
->free_sw_desc
, free_list
) {
326 if (async_tx_test_ack(&sw_desc
->async_tx
)) {
333 spin_unlock_bh(&xor_dev
->lock
);
337 list_del(&sw_desc
->free_list
);
339 /* Release the channel */
340 spin_unlock_bh(&xor_dev
->lock
);
346 * Prepare a HW descriptor for a memcpy operation
348 static struct dma_async_tx_descriptor
*
349 mv_xor_v2_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dest
,
350 dma_addr_t src
, size_t len
, unsigned long flags
)
352 struct mv_xor_v2_sw_desc
*sw_desc
;
353 struct mv_xor_v2_descriptor
*hw_descriptor
;
354 struct mv_xor_v2_device
*xor_dev
;
356 xor_dev
= container_of(chan
, struct mv_xor_v2_device
, dmachan
);
358 dev_dbg(xor_dev
->dmadev
.dev
,
359 "%s len: %zu src %pad dest %pad flags: %ld\n",
360 __func__
, len
, &src
, &dest
, flags
);
362 sw_desc
= mv_xor_v2_prep_sw_desc(xor_dev
);
366 sw_desc
->async_tx
.flags
= flags
;
368 /* set the HW descriptor */
369 hw_descriptor
= &sw_desc
->hw_desc
;
371 /* save the SW descriptor ID to restore when operation is done */
372 hw_descriptor
->desc_id
= sw_desc
->idx
;
374 /* Set the MEMCPY control word */
375 hw_descriptor
->desc_ctrl
=
376 DESC_OP_MODE_MEMCPY
<< DESC_OP_MODE_SHIFT
;
378 if (flags
& DMA_PREP_INTERRUPT
)
379 hw_descriptor
->desc_ctrl
|= DESC_IOD
;
381 /* Set source address */
382 hw_descriptor
->fill_pattern_src_addr
[0] = lower_32_bits(src
);
383 hw_descriptor
->fill_pattern_src_addr
[1] =
384 upper_32_bits(src
) & 0xFFFF;
386 /* Set Destination address */
387 hw_descriptor
->fill_pattern_src_addr
[2] = lower_32_bits(dest
);
388 hw_descriptor
->fill_pattern_src_addr
[3] =
389 upper_32_bits(dest
) & 0xFFFF;
391 /* Set buffers size */
392 hw_descriptor
->buff_size
= len
;
394 /* return the async tx descriptor */
395 return &sw_desc
->async_tx
;
399 * Prepare a HW descriptor for a XOR operation
401 static struct dma_async_tx_descriptor
*
402 mv_xor_v2_prep_dma_xor(struct dma_chan
*chan
, dma_addr_t dest
, dma_addr_t
*src
,
403 unsigned int src_cnt
, size_t len
, unsigned long flags
)
405 struct mv_xor_v2_sw_desc
*sw_desc
;
406 struct mv_xor_v2_descriptor
*hw_descriptor
;
407 struct mv_xor_v2_device
*xor_dev
=
408 container_of(chan
, struct mv_xor_v2_device
, dmachan
);
411 if (src_cnt
> MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF
|| src_cnt
< 1)
414 dev_dbg(xor_dev
->dmadev
.dev
,
415 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
416 __func__
, src_cnt
, len
, &dest
, flags
);
418 sw_desc
= mv_xor_v2_prep_sw_desc(xor_dev
);
422 sw_desc
->async_tx
.flags
= flags
;
424 /* set the HW descriptor */
425 hw_descriptor
= &sw_desc
->hw_desc
;
427 /* save the SW descriptor ID to restore when operation is done */
428 hw_descriptor
->desc_id
= sw_desc
->idx
;
430 /* Set the XOR control word */
431 hw_descriptor
->desc_ctrl
=
432 DESC_OP_MODE_XOR
<< DESC_OP_MODE_SHIFT
;
433 hw_descriptor
->desc_ctrl
|= DESC_P_BUFFER_ENABLE
;
435 if (flags
& DMA_PREP_INTERRUPT
)
436 hw_descriptor
->desc_ctrl
|= DESC_IOD
;
438 /* Set the data buffers */
439 for (i
= 0; i
< src_cnt
; i
++)
440 mv_xor_v2_set_data_buffers(xor_dev
, hw_descriptor
, src
[i
], i
);
442 hw_descriptor
->desc_ctrl
|=
443 src_cnt
<< DESC_NUM_ACTIVE_D_BUF_SHIFT
;
445 /* Set Destination address */
446 hw_descriptor
->fill_pattern_src_addr
[2] = lower_32_bits(dest
);
447 hw_descriptor
->fill_pattern_src_addr
[3] =
448 upper_32_bits(dest
) & 0xFFFF;
450 /* Set buffers size */
451 hw_descriptor
->buff_size
= len
;
453 /* return the async tx descriptor */
454 return &sw_desc
->async_tx
;
458 * Prepare a HW descriptor for interrupt operation.
460 static struct dma_async_tx_descriptor
*
461 mv_xor_v2_prep_dma_interrupt(struct dma_chan
*chan
, unsigned long flags
)
463 struct mv_xor_v2_sw_desc
*sw_desc
;
464 struct mv_xor_v2_descriptor
*hw_descriptor
;
465 struct mv_xor_v2_device
*xor_dev
=
466 container_of(chan
, struct mv_xor_v2_device
, dmachan
);
468 sw_desc
= mv_xor_v2_prep_sw_desc(xor_dev
);
472 /* set the HW descriptor */
473 hw_descriptor
= &sw_desc
->hw_desc
;
475 /* save the SW descriptor ID to restore when operation is done */
476 hw_descriptor
->desc_id
= sw_desc
->idx
;
478 /* Set the INTERRUPT control word */
479 hw_descriptor
->desc_ctrl
=
480 DESC_OP_MODE_NOP
<< DESC_OP_MODE_SHIFT
;
481 hw_descriptor
->desc_ctrl
|= DESC_IOD
;
483 /* return the async tx descriptor */
484 return &sw_desc
->async_tx
;
488 * push pending transactions to hardware
490 static void mv_xor_v2_issue_pending(struct dma_chan
*chan
)
492 struct mv_xor_v2_device
*xor_dev
=
493 container_of(chan
, struct mv_xor_v2_device
, dmachan
);
495 spin_lock_bh(&xor_dev
->lock
);
498 * update the engine with the number of descriptors to
501 mv_xor_v2_add_desc_to_desq(xor_dev
, xor_dev
->npendings
);
502 xor_dev
->npendings
= 0;
504 /* Activate the channel */
505 writel(0, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_STOP_OFF
);
507 spin_unlock_bh(&xor_dev
->lock
);
511 int mv_xor_v2_get_pending_params(struct mv_xor_v2_device
*xor_dev
,
516 reg
= readl(xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_DONE_OFF
);
518 /* get the next pending descriptor index */
519 *pending_ptr
= ((reg
>> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT
) &
520 MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK
);
522 /* get the number of descriptors pending handle */
523 return ((reg
>> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT
) &
524 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK
);
528 * handle the descriptors after HW process
530 static void mv_xor_v2_tasklet(unsigned long data
)
532 struct mv_xor_v2_device
*xor_dev
= (struct mv_xor_v2_device
*) data
;
533 int pending_ptr
, num_of_pending
, i
;
534 struct mv_xor_v2_sw_desc
*next_pending_sw_desc
= NULL
;
536 dev_dbg(xor_dev
->dmadev
.dev
, "%s %d\n", __func__
, __LINE__
);
538 /* get the pending descriptors parameters */
539 num_of_pending
= mv_xor_v2_get_pending_params(xor_dev
, &pending_ptr
);
541 /* loop over free descriptors */
542 for (i
= 0; i
< num_of_pending
; i
++) {
543 struct mv_xor_v2_descriptor
*next_pending_hw_desc
=
544 xor_dev
->hw_desq_virt
+ pending_ptr
;
546 /* get the SW descriptor related to the HW descriptor */
547 next_pending_sw_desc
=
548 &xor_dev
->sw_desq
[next_pending_hw_desc
->desc_id
];
550 /* call the callback */
551 if (next_pending_sw_desc
->async_tx
.cookie
> 0) {
553 * update the channel's completed cookie - no
554 * lock is required the IMSG threshold provide
557 dma_cookie_complete(&next_pending_sw_desc
->async_tx
);
559 if (next_pending_sw_desc
->async_tx
.callback
)
560 next_pending_sw_desc
->async_tx
.callback(
561 next_pending_sw_desc
->async_tx
.callback_param
);
563 dma_descriptor_unmap(&next_pending_sw_desc
->async_tx
);
566 dma_run_dependencies(&next_pending_sw_desc
->async_tx
);
568 /* Lock the channel */
569 spin_lock_bh(&xor_dev
->lock
);
571 /* add the SW descriptor to the free descriptors list */
572 list_add(&next_pending_sw_desc
->free_list
,
573 &xor_dev
->free_sw_desc
);
575 /* Release the channel */
576 spin_unlock_bh(&xor_dev
->lock
);
578 /* increment the next descriptor */
580 if (pending_ptr
>= MV_XOR_V2_DESC_NUM
)
584 if (num_of_pending
!= 0) {
585 /* free the descriptores */
586 mv_xor_v2_free_desc_from_desq(xor_dev
, num_of_pending
);
591 * Set DMA Interrupt-message (IMSG) parameters
593 static void mv_xor_v2_set_msi_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
595 struct mv_xor_v2_device
*xor_dev
= dev_get_drvdata(desc
->dev
);
597 writel(msg
->address_lo
,
598 xor_dev
->dma_base
+ MV_XOR_V2_DMA_IMSG_BALR_OFF
);
599 writel(msg
->address_hi
& 0xFFFF,
600 xor_dev
->dma_base
+ MV_XOR_V2_DMA_IMSG_BAHR_OFF
);
602 xor_dev
->dma_base
+ MV_XOR_V2_DMA_IMSG_CDAT_OFF
);
605 static int mv_xor_v2_descq_init(struct mv_xor_v2_device
*xor_dev
)
609 /* write the DESQ size to the DMA engine */
610 writel(MV_XOR_V2_DESC_NUM
,
611 xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_SIZE_OFF
);
613 /* write the DESQ address to the DMA enngine*/
614 writel(xor_dev
->hw_desq
& 0xFFFFFFFF,
615 xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_BALR_OFF
);
616 writel((xor_dev
->hw_desq
& 0xFFFF00000000) >> 32,
617 xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_BAHR_OFF
);
620 * This is a temporary solution, until we activate the
621 * SMMU. Set the attributes for reading & writing data buffers
624 * - OuterShareable - Snoops will be performed on CPU caches
625 * - Enable cacheable - Bufferable, Modifiable, Other Allocate
628 reg
= readl(xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_ARATTR_OFF
);
629 reg
&= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK
;
630 reg
|= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE
|
631 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE
;
632 writel(reg
, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_ARATTR_OFF
);
634 reg
= readl(xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_AWATTR_OFF
);
635 reg
&= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK
;
636 reg
|= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE
|
637 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE
;
638 writel(reg
, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_AWATTR_OFF
);
640 /* BW CTRL - set values to optimize the XOR performance:
642 * - Set WrBurstLen & RdBurstLen - the unit will issue
643 * maximum of 256B write/read transactions.
644 * - Limit the number of outstanding write & read data
645 * (OBB/IBB) requests to the maximal value.
647 reg
= ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL
<<
648 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT
) |
649 (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL
<<
650 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT
) |
651 (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL
<<
652 MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT
) |
653 (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL
<<
654 MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT
));
655 writel(reg
, xor_dev
->glob_base
+ MV_XOR_V2_GLOB_BW_CTRL
);
657 /* Disable the AXI timer feature */
658 reg
= readl(xor_dev
->glob_base
+ MV_XOR_V2_GLOB_PAUSE
);
659 reg
|= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL
;
660 writel(reg
, xor_dev
->glob_base
+ MV_XOR_V2_GLOB_PAUSE
);
662 /* enable the DMA engine */
663 writel(0, xor_dev
->dma_base
+ MV_XOR_V2_DMA_DESQ_STOP_OFF
);
668 static int mv_xor_v2_probe(struct platform_device
*pdev
)
670 struct mv_xor_v2_device
*xor_dev
;
671 struct resource
*res
;
673 struct dma_device
*dma_dev
;
674 struct mv_xor_v2_sw_desc
*sw_desc
;
675 struct msi_desc
*msi_desc
;
677 BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor
) !=
678 MV_XOR_V2_EXT_DESC_SIZE
);
680 xor_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*xor_dev
), GFP_KERNEL
);
684 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
685 xor_dev
->dma_base
= devm_ioremap_resource(&pdev
->dev
, res
);
686 if (IS_ERR(xor_dev
->dma_base
))
687 return PTR_ERR(xor_dev
->dma_base
);
689 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
690 xor_dev
->glob_base
= devm_ioremap_resource(&pdev
->dev
, res
);
691 if (IS_ERR(xor_dev
->glob_base
))
692 return PTR_ERR(xor_dev
->glob_base
);
694 platform_set_drvdata(pdev
, xor_dev
);
696 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40));
700 xor_dev
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
701 if (IS_ERR(xor_dev
->clk
) && PTR_ERR(xor_dev
->clk
) == -EPROBE_DEFER
)
702 return -EPROBE_DEFER
;
703 if (!IS_ERR(xor_dev
->clk
)) {
704 ret
= clk_prepare_enable(xor_dev
->clk
);
709 ret
= platform_msi_domain_alloc_irqs(&pdev
->dev
, 1,
710 mv_xor_v2_set_msi_msg
);
714 msi_desc
= first_msi_entry(&pdev
->dev
);
718 ret
= devm_request_irq(&pdev
->dev
, msi_desc
->irq
,
719 mv_xor_v2_interrupt_handler
, 0,
720 dev_name(&pdev
->dev
), xor_dev
);
724 tasklet_init(&xor_dev
->irq_tasklet
, mv_xor_v2_tasklet
,
725 (unsigned long) xor_dev
);
727 xor_dev
->desc_size
= mv_xor_v2_set_desc_size(xor_dev
);
729 dma_cookie_init(&xor_dev
->dmachan
);
732 * allocate coherent memory for hardware descriptors
733 * note: writecombine gives slightly better performance, but
734 * requires that we explicitly flush the writes
736 xor_dev
->hw_desq_virt
=
737 dma_alloc_coherent(&pdev
->dev
,
738 xor_dev
->desc_size
* MV_XOR_V2_DESC_NUM
,
739 &xor_dev
->hw_desq
, GFP_KERNEL
);
740 if (!xor_dev
->hw_desq_virt
) {
745 /* alloc memory for the SW descriptors */
746 xor_dev
->sw_desq
= devm_kzalloc(&pdev
->dev
, sizeof(*sw_desc
) *
747 MV_XOR_V2_DESC_NUM
, GFP_KERNEL
);
748 if (!xor_dev
->sw_desq
) {
753 spin_lock_init(&xor_dev
->lock
);
755 /* init the free SW descriptors list */
756 INIT_LIST_HEAD(&xor_dev
->free_sw_desc
);
758 /* add all SW descriptors to the free list */
759 for (i
= 0; i
< MV_XOR_V2_DESC_NUM
; i
++) {
760 struct mv_xor_v2_sw_desc
*sw_desc
=
761 xor_dev
->sw_desq
+ i
;
763 dma_async_tx_descriptor_init(&sw_desc
->async_tx
,
765 sw_desc
->async_tx
.tx_submit
= mv_xor_v2_tx_submit
;
766 async_tx_ack(&sw_desc
->async_tx
);
768 list_add(&sw_desc
->free_list
,
769 &xor_dev
->free_sw_desc
);
772 dma_dev
= &xor_dev
->dmadev
;
774 /* set DMA capabilities */
775 dma_cap_zero(dma_dev
->cap_mask
);
776 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
777 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
778 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
780 /* init dma link list */
781 INIT_LIST_HEAD(&dma_dev
->channels
);
783 /* set base routines */
784 dma_dev
->device_tx_status
= dma_cookie_status
;
785 dma_dev
->device_issue_pending
= mv_xor_v2_issue_pending
;
786 dma_dev
->dev
= &pdev
->dev
;
788 dma_dev
->device_prep_dma_memcpy
= mv_xor_v2_prep_dma_memcpy
;
789 dma_dev
->device_prep_dma_interrupt
= mv_xor_v2_prep_dma_interrupt
;
790 dma_dev
->max_xor
= 8;
791 dma_dev
->device_prep_dma_xor
= mv_xor_v2_prep_dma_xor
;
793 xor_dev
->dmachan
.device
= dma_dev
;
795 list_add_tail(&xor_dev
->dmachan
.device_node
,
798 mv_xor_v2_descq_init(xor_dev
);
800 ret
= dma_async_device_register(dma_dev
);
804 dev_notice(&pdev
->dev
, "Marvell Version 2 XOR driver\n");
809 dma_free_coherent(&pdev
->dev
,
810 xor_dev
->desc_size
* MV_XOR_V2_DESC_NUM
,
811 xor_dev
->hw_desq_virt
, xor_dev
->hw_desq
);
813 platform_msi_domain_free_irqs(&pdev
->dev
);
815 if (!IS_ERR(xor_dev
->clk
))
816 clk_disable_unprepare(xor_dev
->clk
);
820 static int mv_xor_v2_remove(struct platform_device
*pdev
)
822 struct mv_xor_v2_device
*xor_dev
= platform_get_drvdata(pdev
);
824 dma_async_device_unregister(&xor_dev
->dmadev
);
826 dma_free_coherent(&pdev
->dev
,
827 xor_dev
->desc_size
* MV_XOR_V2_DESC_NUM
,
828 xor_dev
->hw_desq_virt
, xor_dev
->hw_desq
);
830 platform_msi_domain_free_irqs(&pdev
->dev
);
832 clk_disable_unprepare(xor_dev
->clk
);
838 static const struct of_device_id mv_xor_v2_dt_ids
[] = {
839 { .compatible
= "marvell,xor-v2", },
842 MODULE_DEVICE_TABLE(of
, mv_xor_v2_dt_ids
);
845 static struct platform_driver mv_xor_v2_driver
= {
846 .probe
= mv_xor_v2_probe
,
847 .remove
= mv_xor_v2_remove
,
850 .of_match_table
= of_match_ptr(mv_xor_v2_dt_ids
),
854 module_platform_driver(mv_xor_v2_driver
);
856 MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
857 MODULE_LICENSE("GPL");