]>
Commit | Line | Data |
---|---|---|
19a340b1 TP |
1 | /* |
2 | * Copyright (C) 2015-2016 Marvell International Ltd. | |
3 | ||
4 | * This program is free software: you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License as | |
6 | * published by the Free Software Foundation, either version 2 of the | |
7 | * License, or any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but | |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | */ | |
14 | ||
15 | #include <linux/clk.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/msi.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/of_irq.h> | |
23 | #include <linux/platform_device.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #include "dmaengine.h" | |
27 | ||
28 | /* DMA Engine Registers */ | |
29 | #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000 | |
30 | #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004 | |
31 | #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008 | |
32 | #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C | |
33 | #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF | |
34 | #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0 | |
35 | #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF | |
36 | #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16 | |
37 | #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010 | |
38 | #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F | |
39 | #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202 | |
40 | #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C | |
41 | #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014 | |
42 | #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018 | |
43 | #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF | |
44 | #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0 | |
d793327f | 45 | #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18) |
19a340b1 TP |
46 | #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C |
47 | /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */ | |
48 | #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C | |
49 | #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF | |
50 | #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16 | |
51 | #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050 | |
52 | #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054 | |
53 | #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100 | |
54 | #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1 | |
55 | #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7 | |
56 | #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800 | |
57 | #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804 | |
58 | #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808 | |
d793327f HH |
59 | #define MV_XOR_V2_DMA_IMSG_TMOT 0x810 |
60 | #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF | |
61 | #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0 | |
19a340b1 TP |
62 | |
63 | /* XOR Global registers */ | |
64 | #define MV_XOR_V2_GLOB_BW_CTRL 0x4 | |
65 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0 | |
66 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64 | |
67 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8 | |
68 | #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8 | |
69 | #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12 | |
70 | #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4 | |
71 | #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16 | |
72 | #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4 | |
73 | #define MV_XOR_V2_GLOB_PAUSE 0x014 | |
74 | #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8 | |
75 | #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200 | |
76 | #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204 | |
77 | #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220 | |
78 | #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224 | |
79 | ||
80 | #define MV_XOR_V2_MIN_DESC_SIZE 32 | |
81 | #define MV_XOR_V2_EXT_DESC_SIZE 128 | |
82 | ||
83 | #define MV_XOR_V2_DESC_RESERVED_SIZE 12 | |
84 | #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12 | |
85 | ||
86 | #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8 | |
87 | ||
88 | /* | |
89 | * Descriptors queue size. With 32 bytes descriptors, up to 2^14 | |
90 | * descriptors are allowed, with 128 bytes descriptors, up to 2^12 | |
91 | * descriptors are allowed. This driver uses 128 bytes descriptors, | |
92 | * but experimentation has shown that a set of 1024 descriptors is | |
93 | * sufficient to reach a good level of performance. | |
94 | */ | |
95 | #define MV_XOR_V2_DESC_NUM 1024 | |
96 | ||
d793327f HH |
97 | /* |
98 | * Threshold values for descriptors and timeout, determined by | |
99 | * experimentation as giving a good level of performance. | |
100 | */ | |
101 | #define MV_XOR_V2_DONE_IMSG_THRD 0x14 | |
102 | #define MV_XOR_V2_TIMER_THRD 0xB0 | |
103 | ||
19a340b1 TP |
104 | /** |
105 | * struct mv_xor_v2_descriptor - DMA HW descriptor | |
106 | * @desc_id: used by S/W and is not affected by H/W. | |
107 | * @flags: error and status flags | |
108 | * @crc32_result: CRC32 calculation result | |
109 | * @desc_ctrl: operation mode and control flags | |
110 | * @buff_size: amount of bytes to be processed | |
111 | * @fill_pattern_src_addr: Fill-Pattern or Source-Address and | |
112 | * AW-Attributes | |
113 | * @data_buff_addr: Source (and might be RAID6 destination) | |
114 | * addresses of data buffers in RAID5 and RAID6 | |
115 | * @reserved: reserved | |
116 | */ | |
117 | struct mv_xor_v2_descriptor { | |
118 | u16 desc_id; | |
119 | u16 flags; | |
120 | u32 crc32_result; | |
121 | u32 desc_ctrl; | |
122 | ||
123 | /* Definitions for desc_ctrl */ | |
124 | #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22 | |
125 | #define DESC_OP_MODE_SHIFT 28 | |
126 | #define DESC_OP_MODE_NOP 0 /* Idle operation */ | |
127 | #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */ | |
128 | #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */ | |
129 | #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */ | |
130 | #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */ | |
131 | #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */ | |
132 | #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */ | |
133 | #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */ | |
134 | #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */ | |
135 | #define DESC_Q_BUFFER_ENABLE BIT(16) | |
136 | #define DESC_P_BUFFER_ENABLE BIT(17) | |
137 | #define DESC_IOD BIT(27) | |
138 | ||
139 | u32 buff_size; | |
140 | u32 fill_pattern_src_addr[4]; | |
141 | u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE]; | |
142 | u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE]; | |
143 | }; | |
144 | ||
145 | /** | |
146 | * struct mv_xor_v2_device - implements a xor device | |
147 | * @lock: lock for the engine | |
148 | * @dma_base: memory mapped DMA register base | |
149 | * @glob_base: memory mapped global register base | |
150 | * @irq_tasklet: | |
151 | * @free_sw_desc: linked list of free SW descriptors | |
152 | * @dmadev: dma device | |
153 | * @dmachan: dma channel | |
154 | * @hw_desq: HW descriptors queue | |
155 | * @hw_desq_virt: virtual address of DESCQ | |
156 | * @sw_desq: SW descriptors queue | |
157 | * @desc_size: HW descriptor size | |
158 | * @npendings: number of pending descriptors (for which tx_submit has | |
159 | * been called, but not yet issue_pending) | |
160 | */ | |
161 | struct mv_xor_v2_device { | |
162 | spinlock_t lock; | |
163 | void __iomem *dma_base; | |
164 | void __iomem *glob_base; | |
165 | struct clk *clk; | |
3cd2c313 | 166 | struct clk *reg_clk; |
19a340b1 TP |
167 | struct tasklet_struct irq_tasklet; |
168 | struct list_head free_sw_desc; | |
169 | struct dma_device dmadev; | |
170 | struct dma_chan dmachan; | |
171 | dma_addr_t hw_desq; | |
172 | struct mv_xor_v2_descriptor *hw_desq_virt; | |
173 | struct mv_xor_v2_sw_desc *sw_desq; | |
174 | int desc_size; | |
175 | unsigned int npendings; | |
44d5887a | 176 | unsigned int hw_queue_idx; |
19a340b1 TP |
177 | }; |
178 | ||
179 | /** | |
180 | * struct mv_xor_v2_sw_desc - implements a xor SW descriptor | |
181 | * @idx: descriptor index | |
182 | * @async_tx: support for the async_tx api | |
183 | * @hw_desc: assosiated HW descriptor | |
184 | * @free_list: node of the free SW descriprots list | |
185 | */ | |
186 | struct mv_xor_v2_sw_desc { | |
187 | int idx; | |
188 | struct dma_async_tx_descriptor async_tx; | |
189 | struct mv_xor_v2_descriptor hw_desc; | |
190 | struct list_head free_list; | |
191 | }; | |
192 | ||
193 | /* | |
194 | * Fill the data buffers to a HW descriptor | |
195 | */ | |
196 | static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, | |
197 | struct mv_xor_v2_descriptor *desc, | |
198 | dma_addr_t src, int index) | |
199 | { | |
200 | int arr_index = ((index >> 1) * 3); | |
201 | ||
202 | /* | |
203 | * Fill the buffer's addresses to the descriptor. | |
204 | * | |
205 | * The format of the buffers address for 2 sequential buffers | |
206 | * X and X + 1: | |
207 | * | |
208 | * First word: Buffer-DX-Address-Low[31:0] | |
209 | * Second word: Buffer-DX+1-Address-Low[31:0] | |
210 | * Third word: DX+1-Buffer-Address-High[47:32] [31:16] | |
211 | * DX-Buffer-Address-High[47:32] [15:0] | |
212 | */ | |
213 | if ((index & 0x1) == 0) { | |
214 | desc->data_buff_addr[arr_index] = lower_32_bits(src); | |
215 | ||
216 | desc->data_buff_addr[arr_index + 2] &= ~0xFFFF; | |
217 | desc->data_buff_addr[arr_index + 2] |= | |
218 | upper_32_bits(src) & 0xFFFF; | |
219 | } else { | |
220 | desc->data_buff_addr[arr_index + 1] = | |
221 | lower_32_bits(src); | |
222 | ||
223 | desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000; | |
224 | desc->data_buff_addr[arr_index + 2] |= | |
225 | (upper_32_bits(src) & 0xFFFF) << 16; | |
226 | } | |
227 | } | |
228 | ||
19a340b1 TP |
229 | /* |
230 | * notify the engine of new descriptors, and update the available index. | |
231 | */ | |
232 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, | |
233 | int num_of_desc) | |
234 | { | |
235 | /* write the number of new descriptors in the DESQ. */ | |
236 | writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF); | |
237 | } | |
238 | ||
239 | /* | |
240 | * free HW descriptors | |
241 | */ | |
242 | static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev, | |
243 | int num_of_desc) | |
244 | { | |
245 | /* write the number of new descriptors in the DESQ. */ | |
246 | writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF); | |
247 | } | |
248 | ||
249 | /* | |
250 | * Set descriptor size | |
251 | * Return the HW descriptor size in bytes | |
252 | */ | |
253 | static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) | |
254 | { | |
255 | writel(MV_XOR_V2_DMA_DESQ_CTRL_128B, | |
256 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF); | |
257 | ||
258 | return MV_XOR_V2_EXT_DESC_SIZE; | |
259 | } | |
260 | ||
d793327f HH |
261 | /* |
262 | * Set the IMSG threshold | |
263 | */ | |
264 | static inline | |
265 | void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev) | |
266 | { | |
267 | u32 reg; | |
268 | ||
269 | /* Configure threshold of number of descriptors, and enable timer */ | |
270 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | |
271 | reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | |
272 | reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | |
273 | reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN; | |
274 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | |
275 | ||
276 | /* Configure Timer Threshold */ | |
277 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); | |
278 | reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK << | |
279 | MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); | |
280 | reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT); | |
281 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT); | |
282 | } | |
283 | ||
19a340b1 TP |
284 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
285 | { | |
286 | struct mv_xor_v2_device *xor_dev = data; | |
287 | unsigned int ndescs; | |
288 | u32 reg; | |
289 | ||
290 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); | |
291 | ||
292 | ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & | |
293 | MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); | |
294 | ||
295 | /* No descriptors to process */ | |
296 | if (!ndescs) | |
297 | return IRQ_NONE; | |
298 | ||
19a340b1 TP |
299 | /* schedule a tasklet to handle descriptors callbacks */ |
300 | tasklet_schedule(&xor_dev->irq_tasklet); | |
301 | ||
302 | return IRQ_HANDLED; | |
303 | } | |
304 | ||
305 | /* | |
306 | * submit a descriptor to the DMA engine | |
307 | */ | |
308 | static dma_cookie_t | |
309 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | |
310 | { | |
19a340b1 TP |
311 | void *dest_hw_desc; |
312 | dma_cookie_t cookie; | |
313 | struct mv_xor_v2_sw_desc *sw_desc = | |
314 | container_of(tx, struct mv_xor_v2_sw_desc, async_tx); | |
315 | struct mv_xor_v2_device *xor_dev = | |
316 | container_of(tx->chan, struct mv_xor_v2_device, dmachan); | |
317 | ||
318 | dev_dbg(xor_dev->dmadev.dev, | |
319 | "%s sw_desc %p: async_tx %p\n", | |
320 | __func__, sw_desc, &sw_desc->async_tx); | |
321 | ||
322 | /* assign coookie */ | |
323 | spin_lock_bh(&xor_dev->lock); | |
324 | cookie = dma_cookie_assign(tx); | |
325 | ||
19a340b1 | 326 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
44d5887a | 327 | dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
19a340b1 TP |
328 | |
329 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); | |
330 | ||
331 | xor_dev->npendings++; | |
44d5887a TP |
332 | xor_dev->hw_queue_idx++; |
333 | if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) | |
334 | xor_dev->hw_queue_idx = 0; | |
19a340b1 TP |
335 | |
336 | spin_unlock_bh(&xor_dev->lock); | |
337 | ||
338 | return cookie; | |
339 | } | |
340 | ||
341 | /* | |
342 | * Prepare a SW descriptor | |
343 | */ | |
344 | static struct mv_xor_v2_sw_desc * | |
345 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | |
346 | { | |
347 | struct mv_xor_v2_sw_desc *sw_desc; | |
bc473da1 | 348 | bool found = false; |
19a340b1 TP |
349 | |
350 | /* Lock the channel */ | |
351 | spin_lock_bh(&xor_dev->lock); | |
352 | ||
353 | if (list_empty(&xor_dev->free_sw_desc)) { | |
354 | spin_unlock_bh(&xor_dev->lock); | |
355 | /* schedule tasklet to free some descriptors */ | |
356 | tasklet_schedule(&xor_dev->irq_tasklet); | |
357 | return NULL; | |
358 | } | |
359 | ||
bc473da1 TP |
360 | list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
361 | if (async_tx_test_ack(&sw_desc->async_tx)) { | |
362 | found = true; | |
363 | break; | |
364 | } | |
365 | } | |
366 | ||
367 | if (!found) { | |
368 | spin_unlock_bh(&xor_dev->lock); | |
369 | return NULL; | |
370 | } | |
371 | ||
19a340b1 TP |
372 | list_del(&sw_desc->free_list); |
373 | ||
374 | /* Release the channel */ | |
375 | spin_unlock_bh(&xor_dev->lock); | |
376 | ||
19a340b1 TP |
377 | return sw_desc; |
378 | } | |
379 | ||
380 | /* | |
381 | * Prepare a HW descriptor for a memcpy operation | |
382 | */ | |
383 | static struct dma_async_tx_descriptor * | |
384 | mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |
385 | dma_addr_t src, size_t len, unsigned long flags) | |
386 | { | |
387 | struct mv_xor_v2_sw_desc *sw_desc; | |
388 | struct mv_xor_v2_descriptor *hw_descriptor; | |
389 | struct mv_xor_v2_device *xor_dev; | |
390 | ||
391 | xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan); | |
392 | ||
393 | dev_dbg(xor_dev->dmadev.dev, | |
394 | "%s len: %zu src %pad dest %pad flags: %ld\n", | |
395 | __func__, len, &src, &dest, flags); | |
396 | ||
397 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
398 | if (!sw_desc) |
399 | return NULL; | |
19a340b1 TP |
400 | |
401 | sw_desc->async_tx.flags = flags; | |
402 | ||
403 | /* set the HW descriptor */ | |
404 | hw_descriptor = &sw_desc->hw_desc; | |
405 | ||
406 | /* save the SW descriptor ID to restore when operation is done */ | |
407 | hw_descriptor->desc_id = sw_desc->idx; | |
408 | ||
409 | /* Set the MEMCPY control word */ | |
410 | hw_descriptor->desc_ctrl = | |
411 | DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT; | |
412 | ||
413 | if (flags & DMA_PREP_INTERRUPT) | |
414 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
415 | ||
416 | /* Set source address */ | |
417 | hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src); | |
418 | hw_descriptor->fill_pattern_src_addr[1] = | |
419 | upper_32_bits(src) & 0xFFFF; | |
420 | ||
421 | /* Set Destination address */ | |
422 | hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); | |
423 | hw_descriptor->fill_pattern_src_addr[3] = | |
424 | upper_32_bits(dest) & 0xFFFF; | |
425 | ||
426 | /* Set buffers size */ | |
427 | hw_descriptor->buff_size = len; | |
428 | ||
429 | /* return the async tx descriptor */ | |
430 | return &sw_desc->async_tx; | |
431 | } | |
432 | ||
433 | /* | |
434 | * Prepare a HW descriptor for a XOR operation | |
435 | */ | |
436 | static struct dma_async_tx_descriptor * | |
437 | mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
438 | unsigned int src_cnt, size_t len, unsigned long flags) | |
439 | { | |
440 | struct mv_xor_v2_sw_desc *sw_desc; | |
441 | struct mv_xor_v2_descriptor *hw_descriptor; | |
442 | struct mv_xor_v2_device *xor_dev = | |
443 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
444 | int i; | |
445 | ||
446 | if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1) | |
447 | return NULL; | |
448 | ||
449 | dev_dbg(xor_dev->dmadev.dev, | |
450 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", | |
451 | __func__, src_cnt, len, &dest, flags); | |
452 | ||
453 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
454 | if (!sw_desc) |
455 | return NULL; | |
19a340b1 TP |
456 | |
457 | sw_desc->async_tx.flags = flags; | |
458 | ||
459 | /* set the HW descriptor */ | |
460 | hw_descriptor = &sw_desc->hw_desc; | |
461 | ||
462 | /* save the SW descriptor ID to restore when operation is done */ | |
463 | hw_descriptor->desc_id = sw_desc->idx; | |
464 | ||
465 | /* Set the XOR control word */ | |
466 | hw_descriptor->desc_ctrl = | |
467 | DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT; | |
468 | hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE; | |
469 | ||
470 | if (flags & DMA_PREP_INTERRUPT) | |
471 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
472 | ||
473 | /* Set the data buffers */ | |
474 | for (i = 0; i < src_cnt; i++) | |
475 | mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i); | |
476 | ||
477 | hw_descriptor->desc_ctrl |= | |
478 | src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT; | |
479 | ||
480 | /* Set Destination address */ | |
481 | hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest); | |
482 | hw_descriptor->fill_pattern_src_addr[3] = | |
483 | upper_32_bits(dest) & 0xFFFF; | |
484 | ||
485 | /* Set buffers size */ | |
486 | hw_descriptor->buff_size = len; | |
487 | ||
488 | /* return the async tx descriptor */ | |
489 | return &sw_desc->async_tx; | |
490 | } | |
491 | ||
492 | /* | |
493 | * Prepare a HW descriptor for interrupt operation. | |
494 | */ | |
495 | static struct dma_async_tx_descriptor * | |
496 | mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |
497 | { | |
498 | struct mv_xor_v2_sw_desc *sw_desc; | |
499 | struct mv_xor_v2_descriptor *hw_descriptor; | |
500 | struct mv_xor_v2_device *xor_dev = | |
501 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
502 | ||
503 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | |
eb8df543 TP |
504 | if (!sw_desc) |
505 | return NULL; | |
19a340b1 TP |
506 | |
507 | /* set the HW descriptor */ | |
508 | hw_descriptor = &sw_desc->hw_desc; | |
509 | ||
510 | /* save the SW descriptor ID to restore when operation is done */ | |
511 | hw_descriptor->desc_id = sw_desc->idx; | |
512 | ||
513 | /* Set the INTERRUPT control word */ | |
514 | hw_descriptor->desc_ctrl = | |
515 | DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT; | |
516 | hw_descriptor->desc_ctrl |= DESC_IOD; | |
517 | ||
518 | /* return the async tx descriptor */ | |
519 | return &sw_desc->async_tx; | |
520 | } | |
521 | ||
522 | /* | |
523 | * push pending transactions to hardware | |
524 | */ | |
525 | static void mv_xor_v2_issue_pending(struct dma_chan *chan) | |
526 | { | |
527 | struct mv_xor_v2_device *xor_dev = | |
528 | container_of(chan, struct mv_xor_v2_device, dmachan); | |
529 | ||
530 | spin_lock_bh(&xor_dev->lock); | |
531 | ||
532 | /* | |
533 | * update the engine with the number of descriptors to | |
534 | * process | |
535 | */ | |
536 | mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings); | |
537 | xor_dev->npendings = 0; | |
538 | ||
19a340b1 TP |
539 | spin_unlock_bh(&xor_dev->lock); |
540 | } | |
541 | ||
542 | static inline | |
543 | int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev, | |
544 | int *pending_ptr) | |
545 | { | |
546 | u32 reg; | |
547 | ||
548 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF); | |
549 | ||
550 | /* get the next pending descriptor index */ | |
551 | *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) & | |
552 | MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK); | |
553 | ||
554 | /* get the number of descriptors pending handle */ | |
555 | return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) & | |
556 | MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK); | |
557 | } | |
558 | ||
559 | /* | |
560 | * handle the descriptors after HW process | |
561 | */ | |
562 | static void mv_xor_v2_tasklet(unsigned long data) | |
563 | { | |
564 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; | |
565 | int pending_ptr, num_of_pending, i; | |
19a340b1 TP |
566 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
567 | ||
568 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); | |
569 | ||
570 | /* get the pending descriptors parameters */ | |
571 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); | |
572 | ||
19a340b1 TP |
573 | /* loop over free descriptors */ |
574 | for (i = 0; i < num_of_pending; i++) { | |
2aab4e18 TP |
575 | struct mv_xor_v2_descriptor *next_pending_hw_desc = |
576 | xor_dev->hw_desq_virt + pending_ptr; | |
19a340b1 TP |
577 | |
578 | /* get the SW descriptor related to the HW descriptor */ | |
579 | next_pending_sw_desc = | |
580 | &xor_dev->sw_desq[next_pending_hw_desc->desc_id]; | |
581 | ||
582 | /* call the callback */ | |
583 | if (next_pending_sw_desc->async_tx.cookie > 0) { | |
584 | /* | |
585 | * update the channel's completed cookie - no | |
586 | * lock is required the IMSG threshold provide | |
587 | * the locking | |
588 | */ | |
589 | dma_cookie_complete(&next_pending_sw_desc->async_tx); | |
590 | ||
591 | if (next_pending_sw_desc->async_tx.callback) | |
592 | next_pending_sw_desc->async_tx.callback( | |
593 | next_pending_sw_desc->async_tx.callback_param); | |
594 | ||
595 | dma_descriptor_unmap(&next_pending_sw_desc->async_tx); | |
596 | } | |
597 | ||
598 | dma_run_dependencies(&next_pending_sw_desc->async_tx); | |
599 | ||
600 | /* Lock the channel */ | |
601 | spin_lock_bh(&xor_dev->lock); | |
602 | ||
603 | /* add the SW descriptor to the free descriptors list */ | |
604 | list_add(&next_pending_sw_desc->free_list, | |
605 | &xor_dev->free_sw_desc); | |
606 | ||
607 | /* Release the channel */ | |
608 | spin_unlock_bh(&xor_dev->lock); | |
609 | ||
610 | /* increment the next descriptor */ | |
611 | pending_ptr++; | |
2aab4e18 TP |
612 | if (pending_ptr >= MV_XOR_V2_DESC_NUM) |
613 | pending_ptr = 0; | |
19a340b1 TP |
614 | } |
615 | ||
616 | if (num_of_pending != 0) { | |
617 | /* free the descriptores */ | |
618 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); | |
619 | } | |
19a340b1 TP |
620 | } |
621 | ||
622 | /* | |
623 | * Set DMA Interrupt-message (IMSG) parameters | |
624 | */ | |
625 | static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
626 | { | |
627 | struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev); | |
628 | ||
629 | writel(msg->address_lo, | |
630 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF); | |
631 | writel(msg->address_hi & 0xFFFF, | |
632 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF); | |
633 | writel(msg->data, | |
634 | xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF); | |
635 | } | |
636 | ||
637 | static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |
638 | { | |
639 | u32 reg; | |
640 | ||
641 | /* write the DESQ size to the DMA engine */ | |
642 | writel(MV_XOR_V2_DESC_NUM, | |
643 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); | |
644 | ||
645 | /* write the DESQ address to the DMA enngine*/ | |
646 | writel(xor_dev->hw_desq & 0xFFFFFFFF, | |
647 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); | |
648 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, | |
649 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); | |
650 | ||
19a340b1 TP |
651 | /* |
652 | * This is a temporary solution, until we activate the | |
653 | * SMMU. Set the attributes for reading & writing data buffers | |
654 | * & descriptors to: | |
655 | * | |
656 | * - OuterShareable - Snoops will be performed on CPU caches | |
657 | * - Enable cacheable - Bufferable, Modifiable, Other Allocate | |
658 | * and Allocate | |
659 | */ | |
660 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); | |
661 | reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; | |
662 | reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | | |
663 | MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; | |
664 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF); | |
665 | ||
666 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); | |
667 | reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK; | |
668 | reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE | | |
669 | MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE; | |
670 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF); | |
671 | ||
672 | /* BW CTRL - set values to optimize the XOR performance: | |
673 | * | |
674 | * - Set WrBurstLen & RdBurstLen - the unit will issue | |
675 | * maximum of 256B write/read transactions. | |
676 | * - Limit the number of outstanding write & read data | |
677 | * (OBB/IBB) requests to the maximal value. | |
678 | */ | |
679 | reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL << | |
680 | MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) | | |
681 | (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL << | |
682 | MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) | | |
683 | (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL << | |
684 | MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) | | |
685 | (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL << | |
686 | MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT)); | |
687 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL); | |
688 | ||
689 | /* Disable the AXI timer feature */ | |
690 | reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | |
691 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; | |
692 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | |
693 | ||
ab2c5f0a HH |
694 | /* enable the DMA engine */ |
695 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | |
696 | ||
19a340b1 TP |
697 | return 0; |
698 | } | |
699 | ||
ecfa7714 HH |
700 | static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state) |
701 | { | |
702 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | |
703 | ||
704 | /* Set this bit to disable to stop the XOR unit. */ | |
705 | writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | |
706 | ||
707 | return 0; | |
708 | } | |
709 | ||
710 | static int mv_xor_v2_resume(struct platform_device *dev) | |
711 | { | |
712 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev); | |
713 | ||
714 | mv_xor_v2_set_desc_size(xor_dev); | |
715 | mv_xor_v2_enable_imsg_thrd(xor_dev); | |
716 | mv_xor_v2_descq_init(xor_dev); | |
717 | ||
718 | return 0; | |
719 | } | |
720 | ||
19a340b1 TP |
721 | static int mv_xor_v2_probe(struct platform_device *pdev) |
722 | { | |
723 | struct mv_xor_v2_device *xor_dev; | |
724 | struct resource *res; | |
725 | int i, ret = 0; | |
726 | struct dma_device *dma_dev; | |
727 | struct mv_xor_v2_sw_desc *sw_desc; | |
728 | struct msi_desc *msi_desc; | |
729 | ||
730 | BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) != | |
731 | MV_XOR_V2_EXT_DESC_SIZE); | |
732 | ||
733 | xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL); | |
734 | if (!xor_dev) | |
735 | return -ENOMEM; | |
736 | ||
737 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
738 | xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res); | |
739 | if (IS_ERR(xor_dev->dma_base)) | |
740 | return PTR_ERR(xor_dev->dma_base); | |
741 | ||
742 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
743 | xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res); | |
744 | if (IS_ERR(xor_dev->glob_base)) | |
745 | return PTR_ERR(xor_dev->glob_base); | |
746 | ||
747 | platform_set_drvdata(pdev, xor_dev); | |
b2d3c270 TP |
748 | |
749 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | |
750 | if (ret) | |
751 | return ret; | |
19a340b1 | 752 | |
3cd2c313 GC |
753 | xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); |
754 | if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { | |
755 | if (!IS_ERR(xor_dev->reg_clk)) { | |
756 | ret = clk_prepare_enable(xor_dev->reg_clk); | |
757 | if (ret) | |
758 | return ret; | |
759 | } else { | |
760 | return PTR_ERR(xor_dev->reg_clk); | |
761 | } | |
762 | } | |
763 | ||
19a340b1 | 764 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
3cd2c313 GC |
765 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { |
766 | ret = EPROBE_DEFER; | |
767 | goto disable_reg_clk; | |
768 | } | |
19a340b1 TP |
769 | if (!IS_ERR(xor_dev->clk)) { |
770 | ret = clk_prepare_enable(xor_dev->clk); | |
771 | if (ret) | |
3cd2c313 | 772 | goto disable_reg_clk; |
19a340b1 TP |
773 | } |
774 | ||
775 | ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, | |
776 | mv_xor_v2_set_msi_msg); | |
777 | if (ret) | |
778 | goto disable_clk; | |
779 | ||
780 | msi_desc = first_msi_entry(&pdev->dev); | |
781 | if (!msi_desc) | |
782 | goto free_msi_irqs; | |
783 | ||
784 | ret = devm_request_irq(&pdev->dev, msi_desc->irq, | |
785 | mv_xor_v2_interrupt_handler, 0, | |
786 | dev_name(&pdev->dev), xor_dev); | |
787 | if (ret) | |
788 | goto free_msi_irqs; | |
789 | ||
790 | tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet, | |
791 | (unsigned long) xor_dev); | |
792 | ||
793 | xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev); | |
794 | ||
795 | dma_cookie_init(&xor_dev->dmachan); | |
796 | ||
797 | /* | |
798 | * allocate coherent memory for hardware descriptors | |
799 | * note: writecombine gives slightly better performance, but | |
800 | * requires that we explicitly flush the writes | |
801 | */ | |
802 | xor_dev->hw_desq_virt = | |
803 | dma_alloc_coherent(&pdev->dev, | |
804 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
805 | &xor_dev->hw_desq, GFP_KERNEL); | |
806 | if (!xor_dev->hw_desq_virt) { | |
807 | ret = -ENOMEM; | |
808 | goto free_msi_irqs; | |
809 | } | |
810 | ||
811 | /* alloc memory for the SW descriptors */ | |
812 | xor_dev->sw_desq = devm_kzalloc(&pdev->dev, sizeof(*sw_desc) * | |
813 | MV_XOR_V2_DESC_NUM, GFP_KERNEL); | |
814 | if (!xor_dev->sw_desq) { | |
815 | ret = -ENOMEM; | |
816 | goto free_hw_desq; | |
817 | } | |
818 | ||
819 | spin_lock_init(&xor_dev->lock); | |
820 | ||
821 | /* init the free SW descriptors list */ | |
822 | INIT_LIST_HEAD(&xor_dev->free_sw_desc); | |
823 | ||
824 | /* add all SW descriptors to the free list */ | |
825 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { | |
bc473da1 TP |
826 | struct mv_xor_v2_sw_desc *sw_desc = |
827 | xor_dev->sw_desq + i; | |
828 | sw_desc->idx = i; | |
829 | dma_async_tx_descriptor_init(&sw_desc->async_tx, | |
830 | &xor_dev->dmachan); | |
831 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | |
832 | async_tx_ack(&sw_desc->async_tx); | |
833 | ||
834 | list_add(&sw_desc->free_list, | |
19a340b1 TP |
835 | &xor_dev->free_sw_desc); |
836 | } | |
837 | ||
838 | dma_dev = &xor_dev->dmadev; | |
839 | ||
840 | /* set DMA capabilities */ | |
841 | dma_cap_zero(dma_dev->cap_mask); | |
842 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
843 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
844 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | |
845 | ||
846 | /* init dma link list */ | |
847 | INIT_LIST_HEAD(&dma_dev->channels); | |
848 | ||
849 | /* set base routines */ | |
850 | dma_dev->device_tx_status = dma_cookie_status; | |
851 | dma_dev->device_issue_pending = mv_xor_v2_issue_pending; | |
852 | dma_dev->dev = &pdev->dev; | |
853 | ||
854 | dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy; | |
855 | dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt; | |
856 | dma_dev->max_xor = 8; | |
857 | dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor; | |
858 | ||
859 | xor_dev->dmachan.device = dma_dev; | |
860 | ||
861 | list_add_tail(&xor_dev->dmachan.device_node, | |
862 | &dma_dev->channels); | |
863 | ||
d793327f HH |
864 | mv_xor_v2_enable_imsg_thrd(xor_dev); |
865 | ||
19a340b1 TP |
866 | mv_xor_v2_descq_init(xor_dev); |
867 | ||
868 | ret = dma_async_device_register(dma_dev); | |
869 | if (ret) | |
870 | goto free_hw_desq; | |
871 | ||
872 | dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n"); | |
873 | ||
874 | return 0; | |
875 | ||
876 | free_hw_desq: | |
877 | dma_free_coherent(&pdev->dev, | |
878 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
879 | xor_dev->hw_desq_virt, xor_dev->hw_desq); | |
880 | free_msi_irqs: | |
881 | platform_msi_domain_free_irqs(&pdev->dev); | |
882 | disable_clk: | |
3cd2c313 GC |
883 | clk_disable_unprepare(xor_dev->clk); |
884 | disable_reg_clk: | |
885 | clk_disable_unprepare(xor_dev->reg_clk); | |
19a340b1 TP |
886 | return ret; |
887 | } | |
888 | ||
889 | static int mv_xor_v2_remove(struct platform_device *pdev) | |
890 | { | |
891 | struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); | |
892 | ||
893 | dma_async_device_unregister(&xor_dev->dmadev); | |
894 | ||
895 | dma_free_coherent(&pdev->dev, | |
896 | xor_dev->desc_size * MV_XOR_V2_DESC_NUM, | |
897 | xor_dev->hw_desq_virt, xor_dev->hw_desq); | |
898 | ||
899 | platform_msi_domain_free_irqs(&pdev->dev); | |
900 | ||
901 | clk_disable_unprepare(xor_dev->clk); | |
902 | ||
903 | return 0; | |
904 | } | |
905 | ||
906 | #ifdef CONFIG_OF | |
907 | static const struct of_device_id mv_xor_v2_dt_ids[] = { | |
908 | { .compatible = "marvell,xor-v2", }, | |
909 | {}, | |
910 | }; | |
911 | MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids); | |
912 | #endif | |
913 | ||
914 | static struct platform_driver mv_xor_v2_driver = { | |
915 | .probe = mv_xor_v2_probe, | |
ecfa7714 HH |
916 | .suspend = mv_xor_v2_suspend, |
917 | .resume = mv_xor_v2_resume, | |
19a340b1 TP |
918 | .remove = mv_xor_v2_remove, |
919 | .driver = { | |
920 | .name = "mv_xor_v2", | |
921 | .of_match_table = of_match_ptr(mv_xor_v2_dt_ids), | |
922 | }, | |
923 | }; | |
924 | ||
925 | module_platform_driver(mv_xor_v2_driver); | |
926 | ||
927 | MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine"); | |
928 | MODULE_LICENSE("GPL"); |