]>
Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | */ | |
18 | ||
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
5a0e3ad6 | 21 | #include <linux/slab.h> |
ff7b0479 SB |
22 | #include <linux/delay.h> |
23 | #include <linux/dma-mapping.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/platform_device.h> | |
27 | #include <linux/memory.h> | |
c510182b | 28 | #include <linux/clk.h> |
f7d12ef5 TP |
29 | #include <linux/of.h> |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/irqdomain.h> | |
c02cecb9 | 32 | #include <linux/platform_data/dma-mv_xor.h> |
d2ebfb33 RKAL |
33 | |
34 | #include "dmaengine.h" | |
ff7b0479 SB |
35 | #include "mv_xor.h" |
36 | ||
37 | static void mv_xor_issue_pending(struct dma_chan *chan); | |
38 | ||
39 | #define to_mv_xor_chan(chan) \ | |
98817b99 | 40 | container_of(chan, struct mv_xor_chan, dmachan) |
ff7b0479 SB |
41 | |
42 | #define to_mv_xor_slot(tx) \ | |
43 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
44 | ||
c98c1781 | 45 | #define mv_chan_to_devp(chan) \ |
1ef48a26 | 46 | ((chan)->dmadev.dev) |
c98c1781 | 47 | |
ff7b0479 SB |
48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) |
49 | { | |
50 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
51 | ||
52 | hw_desc->status = (1 << 31); | |
53 | hw_desc->phy_next_desc = 0; | |
54 | hw_desc->desc_command = (1 << 31); | |
55 | } | |
56 | ||
57 | static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) | |
58 | { | |
59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
60 | return hw_desc->phy_dest_addr; | |
61 | } | |
62 | ||
63 | static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, | |
64 | int src_idx) | |
65 | { | |
66 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 67 | return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; |
ff7b0479 SB |
68 | } |
69 | ||
70 | ||
71 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, | |
72 | u32 byte_count) | |
73 | { | |
74 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
75 | hw_desc->byte_count = byte_count; | |
76 | } | |
77 | ||
78 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | |
79 | u32 next_desc_addr) | |
80 | { | |
81 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
82 | BUG_ON(hw_desc->phy_next_desc); | |
83 | hw_desc->phy_next_desc = next_desc_addr; | |
84 | } | |
85 | ||
86 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |
87 | { | |
88 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
89 | hw_desc->phy_next_desc = 0; | |
90 | } | |
91 | ||
ff7b0479 SB |
92 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, |
93 | dma_addr_t addr) | |
94 | { | |
95 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
96 | hw_desc->phy_dest_addr = addr; | |
97 | } | |
98 | ||
99 | static int mv_chan_memset_slot_count(size_t len) | |
100 | { | |
101 | return 1; | |
102 | } | |
103 | ||
104 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | |
105 | ||
106 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | |
107 | int index, dma_addr_t addr) | |
108 | { | |
109 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 110 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b0479 SB |
111 | if (desc->type == DMA_XOR) |
112 | hw_desc->desc_command |= (1 << index); | |
113 | } | |
114 | ||
115 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
116 | { | |
5733c38a | 117 | return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b0479 SB |
118 | } |
119 | ||
120 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
121 | u32 next_desc_addr) | |
122 | { | |
5733c38a | 123 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b0479 SB |
124 | } |
125 | ||
ff7b0479 SB |
126 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
127 | { | |
5733c38a | 128 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b0479 | 129 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38a | 130 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b0479 SB |
131 | } |
132 | ||
133 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
134 | { | |
5733c38a | 135 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
136 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
137 | return intr_cause; | |
138 | } | |
139 | ||
140 | static int mv_is_err_intr(u32 intr_cause) | |
141 | { | |
142 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | |
143 | return 1; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | |
149 | { | |
86363682 | 150 | u32 val = ~(1 << (chan->idx * 16)); |
c98c1781 | 151 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
5733c38a | 152 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
153 | } |
154 | ||
155 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |
156 | { | |
157 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
5733c38a | 158 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
159 | } |
160 | ||
161 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | |
162 | { | |
163 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | |
164 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | |
165 | ||
166 | if (chain_old_tail->type != desc->type) | |
167 | return 0; | |
ff7b0479 SB |
168 | |
169 | return 1; | |
170 | } | |
171 | ||
172 | static void mv_set_mode(struct mv_xor_chan *chan, | |
173 | enum dma_transaction_type type) | |
174 | { | |
175 | u32 op_mode; | |
5733c38a | 176 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b0479 SB |
177 | |
178 | switch (type) { | |
179 | case DMA_XOR: | |
180 | op_mode = XOR_OPERATION_MODE_XOR; | |
181 | break; | |
182 | case DMA_MEMCPY: | |
183 | op_mode = XOR_OPERATION_MODE_MEMCPY; | |
184 | break; | |
ff7b0479 | 185 | default: |
c98c1781 | 186 | dev_err(mv_chan_to_devp(chan), |
1ba151cd | 187 | "error: unsupported operation %d\n", |
a3fc74bc | 188 | type); |
ff7b0479 SB |
189 | BUG(); |
190 | return; | |
191 | } | |
192 | ||
193 | config &= ~0x7; | |
194 | config |= op_mode; | |
e03bc654 TP |
195 | |
196 | #if defined(__BIG_ENDIAN) | |
197 | config |= XOR_DESCRIPTOR_SWAP; | |
198 | #else | |
199 | config &= ~XOR_DESCRIPTOR_SWAP; | |
200 | #endif | |
201 | ||
5733c38a | 202 | writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b0479 SB |
203 | chan->current_type = type; |
204 | } | |
205 | ||
206 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
207 | { | |
208 | u32 activation; | |
209 | ||
c98c1781 | 210 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
5733c38a | 211 | activation = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 | 212 | activation |= 0x1; |
5733c38a | 213 | writel_relaxed(activation, XOR_ACTIVATION(chan)); |
ff7b0479 SB |
214 | } |
215 | ||
216 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
217 | { | |
5733c38a | 218 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 SB |
219 | |
220 | state = (state >> 4) & 0x3; | |
221 | ||
222 | return (state == 1) ? 1 : 0; | |
223 | } | |
224 | ||
225 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | |
226 | { | |
227 | return 1; | |
228 | } | |
229 | ||
230 | /** | |
231 | * mv_xor_free_slots - flags descriptor slots for reuse | |
232 | * @slot: Slot to free | |
233 | * Caller must hold &mv_chan->lock while calling this function | |
234 | */ | |
235 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |
236 | struct mv_xor_desc_slot *slot) | |
237 | { | |
c98c1781 | 238 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", |
ff7b0479 SB |
239 | __func__, __LINE__, slot); |
240 | ||
241 | slot->slots_per_op = 0; | |
242 | ||
243 | } | |
244 | ||
245 | /* | |
246 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | |
247 | * sw_desc | |
248 | * Caller must hold &mv_chan->lock while calling this function | |
249 | */ | |
250 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |
251 | struct mv_xor_desc_slot *sw_desc) | |
252 | { | |
c98c1781 | 253 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
ff7b0479 SB |
254 | __func__, __LINE__, sw_desc); |
255 | if (sw_desc->type != mv_chan->current_type) | |
256 | mv_set_mode(mv_chan, sw_desc->type); | |
257 | ||
48a9db46 BZ |
258 | /* set the hardware chain */ |
259 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
260 | ||
ff7b0479 | 261 | mv_chan->pending += sw_desc->slot_cnt; |
98817b99 | 262 | mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b0479 SB |
263 | } |
264 | ||
265 | static dma_cookie_t | |
266 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |
267 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | |
268 | { | |
269 | BUG_ON(desc->async_tx.cookie < 0); | |
270 | ||
271 | if (desc->async_tx.cookie > 0) { | |
272 | cookie = desc->async_tx.cookie; | |
273 | ||
274 | /* call the callback (must not sleep or submit new | |
275 | * operations to this channel) | |
276 | */ | |
277 | if (desc->async_tx.callback) | |
278 | desc->async_tx.callback( | |
279 | desc->async_tx.callback_param); | |
280 | ||
d38a8c62 | 281 | dma_descriptor_unmap(&desc->async_tx); |
ff7b0479 SB |
282 | /* unmap dma addresses |
283 | * (unmap_single vs unmap_page?) | |
284 | */ | |
285 | if (desc->group_head && desc->unmap_len) { | |
286 | struct mv_xor_desc_slot *unmap = desc->group_head; | |
ecde6cd4 | 287 | struct device *dev = mv_chan_to_devp(mv_chan); |
ff7b0479 | 288 | u32 len = unmap->unmap_len; |
e1d181ef DW |
289 | enum dma_ctrl_flags flags = desc->async_tx.flags; |
290 | u32 src_cnt; | |
291 | dma_addr_t addr; | |
a06d568f | 292 | dma_addr_t dest; |
ff7b0479 | 293 | |
a06d568f DW |
294 | src_cnt = unmap->unmap_src_cnt; |
295 | dest = mv_desc_get_dest_addr(unmap); | |
e1d181ef | 296 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
a06d568f DW |
297 | enum dma_data_direction dir; |
298 | ||
299 | if (src_cnt > 1) /* is xor ? */ | |
300 | dir = DMA_BIDIRECTIONAL; | |
301 | else | |
302 | dir = DMA_FROM_DEVICE; | |
303 | dma_unmap_page(dev, dest, len, dir); | |
e1d181ef DW |
304 | } |
305 | ||
306 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | |
e1d181ef DW |
307 | while (src_cnt--) { |
308 | addr = mv_desc_get_src_addr(unmap, | |
309 | src_cnt); | |
a06d568f DW |
310 | if (addr == dest) |
311 | continue; | |
e1d181ef DW |
312 | dma_unmap_page(dev, addr, len, |
313 | DMA_TO_DEVICE); | |
314 | } | |
ff7b0479 SB |
315 | } |
316 | desc->group_head = NULL; | |
317 | } | |
318 | } | |
319 | ||
320 | /* run dependent operations */ | |
07f2211e | 321 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
322 | |
323 | return cookie; | |
324 | } | |
325 | ||
326 | static int | |
327 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | |
328 | { | |
329 | struct mv_xor_desc_slot *iter, *_iter; | |
330 | ||
c98c1781 | 331 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
ff7b0479 SB |
332 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
333 | completed_node) { | |
334 | ||
335 | if (async_tx_test_ack(&iter->async_tx)) { | |
336 | list_del(&iter->completed_node); | |
337 | mv_xor_free_slots(mv_chan, iter); | |
338 | } | |
339 | } | |
340 | return 0; | |
341 | } | |
342 | ||
343 | static int | |
344 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | |
345 | struct mv_xor_chan *mv_chan) | |
346 | { | |
c98c1781 | 347 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", |
ff7b0479 SB |
348 | __func__, __LINE__, desc, desc->async_tx.flags); |
349 | list_del(&desc->chain_node); | |
350 | /* the client is allowed to attach dependent operations | |
351 | * until 'ack' is set | |
352 | */ | |
353 | if (!async_tx_test_ack(&desc->async_tx)) { | |
354 | /* move this slot to the completed_slots */ | |
355 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | mv_xor_free_slots(mv_chan, desc); | |
360 | return 0; | |
361 | } | |
362 | ||
363 | static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
364 | { | |
365 | struct mv_xor_desc_slot *iter, *_iter; | |
366 | dma_cookie_t cookie = 0; | |
367 | int busy = mv_chan_is_busy(mv_chan); | |
368 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
369 | int seen_current = 0; | |
370 | ||
c98c1781 TP |
371 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
372 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | |
ff7b0479 SB |
373 | mv_xor_clean_completed_slots(mv_chan); |
374 | ||
375 | /* free completed slots from the chain starting with | |
376 | * the oldest descriptor | |
377 | */ | |
378 | ||
379 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
380 | chain_node) { | |
381 | prefetch(_iter); | |
382 | prefetch(&_iter->async_tx); | |
383 | ||
384 | /* do not advance past the current descriptor loaded into the | |
385 | * hardware channel, subsequent descriptors are either in | |
386 | * process or have not been submitted | |
387 | */ | |
388 | if (seen_current) | |
389 | break; | |
390 | ||
391 | /* stop the search if we reach the current descriptor and the | |
392 | * channel is busy | |
393 | */ | |
394 | if (iter->async_tx.phys == current_desc) { | |
395 | seen_current = 1; | |
396 | if (busy) | |
397 | break; | |
398 | } | |
399 | ||
400 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | |
401 | ||
402 | if (mv_xor_clean_slot(iter, mv_chan)) | |
403 | break; | |
404 | } | |
405 | ||
406 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
407 | struct mv_xor_desc_slot *chain_head; | |
408 | chain_head = list_entry(mv_chan->chain.next, | |
409 | struct mv_xor_desc_slot, | |
410 | chain_node); | |
411 | ||
412 | mv_xor_start_new_chain(mv_chan, chain_head); | |
413 | } | |
414 | ||
415 | if (cookie > 0) | |
98817b99 | 416 | mv_chan->dmachan.completed_cookie = cookie; |
ff7b0479 SB |
417 | } |
418 | ||
419 | static void | |
420 | mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
421 | { | |
422 | spin_lock_bh(&mv_chan->lock); | |
423 | __mv_xor_slot_cleanup(mv_chan); | |
424 | spin_unlock_bh(&mv_chan->lock); | |
425 | } | |
426 | ||
427 | static void mv_xor_tasklet(unsigned long data) | |
428 | { | |
429 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
8333f65e | 430 | mv_xor_slot_cleanup(chan); |
ff7b0479 SB |
431 | } |
432 | ||
433 | static struct mv_xor_desc_slot * | |
434 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | |
435 | int slots_per_op) | |
436 | { | |
437 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | |
438 | LIST_HEAD(chain); | |
439 | int slots_found, retry = 0; | |
440 | ||
441 | /* start search from the last allocated descrtiptor | |
442 | * if a contiguous allocation can not be found start searching | |
443 | * from the beginning of the list | |
444 | */ | |
445 | retry: | |
446 | slots_found = 0; | |
447 | if (retry == 0) | |
448 | iter = mv_chan->last_used; | |
449 | else | |
450 | iter = list_entry(&mv_chan->all_slots, | |
451 | struct mv_xor_desc_slot, | |
452 | slot_node); | |
453 | ||
454 | list_for_each_entry_safe_continue( | |
455 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
456 | prefetch(_iter); | |
457 | prefetch(&_iter->async_tx); | |
458 | if (iter->slots_per_op) { | |
459 | /* give up after finding the first busy slot | |
460 | * on the second pass through the list | |
461 | */ | |
462 | if (retry) | |
463 | break; | |
464 | ||
465 | slots_found = 0; | |
466 | continue; | |
467 | } | |
468 | ||
469 | /* start the allocation if the slot is correctly aligned */ | |
470 | if (!slots_found++) | |
471 | alloc_start = iter; | |
472 | ||
473 | if (slots_found == num_slots) { | |
474 | struct mv_xor_desc_slot *alloc_tail = NULL; | |
475 | struct mv_xor_desc_slot *last_used = NULL; | |
476 | iter = alloc_start; | |
477 | while (num_slots) { | |
478 | int i; | |
479 | ||
480 | /* pre-ack all but the last descriptor */ | |
481 | async_tx_ack(&iter->async_tx); | |
482 | ||
483 | list_add_tail(&iter->chain_node, &chain); | |
484 | alloc_tail = iter; | |
485 | iter->async_tx.cookie = 0; | |
486 | iter->slot_cnt = num_slots; | |
487 | iter->xor_check_result = NULL; | |
488 | for (i = 0; i < slots_per_op; i++) { | |
489 | iter->slots_per_op = slots_per_op - i; | |
490 | last_used = iter; | |
491 | iter = list_entry(iter->slot_node.next, | |
492 | struct mv_xor_desc_slot, | |
493 | slot_node); | |
494 | } | |
495 | num_slots -= slots_per_op; | |
496 | } | |
497 | alloc_tail->group_head = alloc_start; | |
498 | alloc_tail->async_tx.cookie = -EBUSY; | |
64203b67 | 499 | list_splice(&chain, &alloc_tail->tx_list); |
ff7b0479 SB |
500 | mv_chan->last_used = last_used; |
501 | mv_desc_clear_next_desc(alloc_start); | |
502 | mv_desc_clear_next_desc(alloc_tail); | |
503 | return alloc_tail; | |
504 | } | |
505 | } | |
506 | if (!retry++) | |
507 | goto retry; | |
508 | ||
509 | /* try to free some slots if the allocation fails */ | |
510 | tasklet_schedule(&mv_chan->irq_tasklet); | |
511 | ||
512 | return NULL; | |
513 | } | |
514 | ||
ff7b0479 SB |
515 | /************************ DMA engine API functions ****************************/ |
516 | static dma_cookie_t | |
517 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
518 | { | |
519 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
520 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
521 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | |
522 | dma_cookie_t cookie; | |
523 | int new_hw_chain = 1; | |
524 | ||
c98c1781 | 525 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
526 | "%s sw_desc %p: async_tx %p\n", |
527 | __func__, sw_desc, &sw_desc->async_tx); | |
528 | ||
529 | grp_start = sw_desc->group_head; | |
530 | ||
531 | spin_lock_bh(&mv_chan->lock); | |
884485e1 | 532 | cookie = dma_cookie_assign(tx); |
ff7b0479 SB |
533 | |
534 | if (list_empty(&mv_chan->chain)) | |
64203b67 | 535 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
ff7b0479 SB |
536 | else { |
537 | new_hw_chain = 0; | |
538 | ||
539 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
540 | struct mv_xor_desc_slot, | |
541 | chain_node); | |
64203b67 | 542 | list_splice_init(&grp_start->tx_list, |
ff7b0479 SB |
543 | &old_chain_tail->chain_node); |
544 | ||
545 | if (!mv_can_chain(grp_start)) | |
546 | goto submit_done; | |
547 | ||
c98c1781 | 548 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", |
ff7b0479 SB |
549 | old_chain_tail->async_tx.phys); |
550 | ||
551 | /* fix up the hardware chain */ | |
552 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | |
553 | ||
554 | /* if the channel is not busy */ | |
555 | if (!mv_chan_is_busy(mv_chan)) { | |
556 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
557 | /* | |
558 | * and the curren desc is the end of the chain before | |
559 | * the append, then we need to start the channel | |
560 | */ | |
561 | if (current_desc == old_chain_tail->async_tx.phys) | |
562 | new_hw_chain = 1; | |
563 | } | |
564 | } | |
565 | ||
566 | if (new_hw_chain) | |
567 | mv_xor_start_new_chain(mv_chan, grp_start); | |
568 | ||
569 | submit_done: | |
570 | spin_unlock_bh(&mv_chan->lock); | |
571 | ||
572 | return cookie; | |
573 | } | |
574 | ||
575 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 576 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 SB |
577 | { |
578 | char *hw_desc; | |
579 | int idx; | |
580 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
581 | struct mv_xor_desc_slot *slot = NULL; | |
b503fa01 | 582 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b0479 SB |
583 | |
584 | /* Allocate descriptor slots */ | |
585 | idx = mv_chan->slots_allocated; | |
586 | while (idx < num_descs_in_pool) { | |
587 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
588 | if (!slot) { | |
589 | printk(KERN_INFO "MV XOR Channel only initialized" | |
590 | " %d descriptor slots", idx); | |
591 | break; | |
592 | } | |
1ef48a26 | 593 | hw_desc = (char *) mv_chan->dma_desc_pool_virt; |
ff7b0479 SB |
594 | slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; |
595 | ||
596 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
597 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
598 | INIT_LIST_HEAD(&slot->chain_node); | |
599 | INIT_LIST_HEAD(&slot->slot_node); | |
64203b67 | 600 | INIT_LIST_HEAD(&slot->tx_list); |
1ef48a26 | 601 | hw_desc = (char *) mv_chan->dma_desc_pool; |
ff7b0479 SB |
602 | slot->async_tx.phys = |
603 | (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; | |
604 | slot->idx = idx++; | |
605 | ||
606 | spin_lock_bh(&mv_chan->lock); | |
607 | mv_chan->slots_allocated = idx; | |
608 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | |
609 | spin_unlock_bh(&mv_chan->lock); | |
610 | } | |
611 | ||
612 | if (mv_chan->slots_allocated && !mv_chan->last_used) | |
613 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | |
614 | struct mv_xor_desc_slot, | |
615 | slot_node); | |
616 | ||
c98c1781 | 617 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
618 | "allocated %d descriptor slots last_used: %p\n", |
619 | mv_chan->slots_allocated, mv_chan->last_used); | |
620 | ||
621 | return mv_chan->slots_allocated ? : -ENOMEM; | |
622 | } | |
623 | ||
624 | static struct dma_async_tx_descriptor * | |
625 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
626 | size_t len, unsigned long flags) | |
627 | { | |
628 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
629 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
630 | int slot_cnt; | |
631 | ||
c98c1781 | 632 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
633 | "%s dest: %x src %x len: %u flags: %ld\n", |
634 | __func__, dest, src, len, flags); | |
635 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
636 | return NULL; | |
637 | ||
7912d300 | 638 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 SB |
639 | |
640 | spin_lock_bh(&mv_chan->lock); | |
641 | slot_cnt = mv_chan_memcpy_slot_count(len); | |
642 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
643 | if (sw_desc) { | |
644 | sw_desc->type = DMA_MEMCPY; | |
645 | sw_desc->async_tx.flags = flags; | |
646 | grp_start = sw_desc->group_head; | |
647 | mv_desc_init(grp_start, flags); | |
648 | mv_desc_set_byte_count(grp_start, len); | |
649 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
650 | mv_desc_set_src_addr(grp_start, 0, src); | |
651 | sw_desc->unmap_src_cnt = 1; | |
652 | sw_desc->unmap_len = len; | |
653 | } | |
654 | spin_unlock_bh(&mv_chan->lock); | |
655 | ||
c98c1781 | 656 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 | 657 | "%s sw_desc %p async_tx %p\n", |
4c143725 | 658 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); |
ff7b0479 SB |
659 | |
660 | return sw_desc ? &sw_desc->async_tx : NULL; | |
661 | } | |
662 | ||
ff7b0479 SB |
663 | static struct dma_async_tx_descriptor * |
664 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
665 | unsigned int src_cnt, size_t len, unsigned long flags) | |
666 | { | |
667 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
668 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
669 | int slot_cnt; | |
670 | ||
671 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
672 | return NULL; | |
673 | ||
7912d300 | 674 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 | 675 | |
c98c1781 | 676 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
677 | "%s src_cnt: %d len: dest %x %u flags: %ld\n", |
678 | __func__, src_cnt, len, dest, flags); | |
679 | ||
680 | spin_lock_bh(&mv_chan->lock); | |
681 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | |
682 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
683 | if (sw_desc) { | |
684 | sw_desc->type = DMA_XOR; | |
685 | sw_desc->async_tx.flags = flags; | |
686 | grp_start = sw_desc->group_head; | |
687 | mv_desc_init(grp_start, flags); | |
688 | /* the byte count field is the same as in memcpy desc*/ | |
689 | mv_desc_set_byte_count(grp_start, len); | |
690 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
691 | sw_desc->unmap_src_cnt = src_cnt; | |
692 | sw_desc->unmap_len = len; | |
693 | while (src_cnt--) | |
694 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | |
695 | } | |
696 | spin_unlock_bh(&mv_chan->lock); | |
c98c1781 | 697 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
698 | "%s sw_desc %p async_tx %p \n", |
699 | __func__, sw_desc, &sw_desc->async_tx); | |
700 | return sw_desc ? &sw_desc->async_tx : NULL; | |
701 | } | |
702 | ||
703 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | |
704 | { | |
705 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
706 | struct mv_xor_desc_slot *iter, *_iter; | |
707 | int in_use_descs = 0; | |
708 | ||
709 | mv_xor_slot_cleanup(mv_chan); | |
710 | ||
711 | spin_lock_bh(&mv_chan->lock); | |
712 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
713 | chain_node) { | |
714 | in_use_descs++; | |
715 | list_del(&iter->chain_node); | |
716 | } | |
717 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
718 | completed_node) { | |
719 | in_use_descs++; | |
720 | list_del(&iter->completed_node); | |
721 | } | |
722 | list_for_each_entry_safe_reverse( | |
723 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
724 | list_del(&iter->slot_node); | |
725 | kfree(iter); | |
726 | mv_chan->slots_allocated--; | |
727 | } | |
728 | mv_chan->last_used = NULL; | |
729 | ||
c98c1781 | 730 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", |
ff7b0479 SB |
731 | __func__, mv_chan->slots_allocated); |
732 | spin_unlock_bh(&mv_chan->lock); | |
733 | ||
734 | if (in_use_descs) | |
c98c1781 | 735 | dev_err(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
736 | "freeing %d in use descriptors!\n", in_use_descs); |
737 | } | |
738 | ||
739 | /** | |
07934481 | 740 | * mv_xor_status - poll the status of an XOR transaction |
ff7b0479 SB |
741 | * @chan: XOR channel handle |
742 | * @cookie: XOR transaction identifier | |
07934481 | 743 | * @txstate: XOR transactions state holder (or NULL) |
ff7b0479 | 744 | */ |
07934481 | 745 | static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b0479 | 746 | dma_cookie_t cookie, |
07934481 | 747 | struct dma_tx_state *txstate) |
ff7b0479 SB |
748 | { |
749 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
ff7b0479 SB |
750 | enum dma_status ret; |
751 | ||
96a2af41 | 752 | ret = dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
753 | if (ret == DMA_SUCCESS) { |
754 | mv_xor_clean_completed_slots(mv_chan); | |
755 | return ret; | |
756 | } | |
757 | mv_xor_slot_cleanup(mv_chan); | |
758 | ||
96a2af41 | 759 | return dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
760 | } |
761 | ||
762 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |
763 | { | |
764 | u32 val; | |
765 | ||
5733c38a | 766 | val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cd | 767 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
ff7b0479 | 768 | |
5733c38a | 769 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cd | 770 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
ff7b0479 | 771 | |
5733c38a | 772 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cd | 773 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
ff7b0479 | 774 | |
5733c38a | 775 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cd | 776 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
ff7b0479 | 777 | |
5733c38a | 778 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cd | 779 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
ff7b0479 | 780 | |
5733c38a | 781 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cd | 782 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
ff7b0479 SB |
783 | } |
784 | ||
785 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | |
786 | u32 intr_cause) | |
787 | { | |
788 | if (intr_cause & (1 << 4)) { | |
c98c1781 | 789 | dev_dbg(mv_chan_to_devp(chan), |
ff7b0479 SB |
790 | "ignore this error\n"); |
791 | return; | |
792 | } | |
793 | ||
c98c1781 | 794 | dev_err(mv_chan_to_devp(chan), |
1ba151cd | 795 | "error on chan %d. intr cause 0x%08x\n", |
a3fc74bc | 796 | chan->idx, intr_cause); |
ff7b0479 SB |
797 | |
798 | mv_dump_xor_regs(chan); | |
799 | BUG(); | |
800 | } | |
801 | ||
802 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
803 | { | |
804 | struct mv_xor_chan *chan = data; | |
805 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
806 | ||
c98c1781 | 807 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
ff7b0479 SB |
808 | |
809 | if (mv_is_err_intr(intr_cause)) | |
810 | mv_xor_err_interrupt_handler(chan, intr_cause); | |
811 | ||
812 | tasklet_schedule(&chan->irq_tasklet); | |
813 | ||
814 | mv_xor_device_clear_eoc_cause(chan); | |
815 | ||
816 | return IRQ_HANDLED; | |
817 | } | |
818 | ||
819 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
820 | { | |
821 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
822 | ||
823 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
824 | mv_chan->pending = 0; | |
825 | mv_chan_activate(mv_chan); | |
826 | } | |
827 | } | |
828 | ||
829 | /* | |
830 | * Perform a transaction to verify the HW works. | |
831 | */ | |
832 | #define MV_XOR_TEST_SIZE 2000 | |
833 | ||
c2714334 | 834 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
835 | { |
836 | int i; | |
837 | void *src, *dest; | |
838 | dma_addr_t src_dma, dest_dma; | |
839 | struct dma_chan *dma_chan; | |
840 | dma_cookie_t cookie; | |
841 | struct dma_async_tx_descriptor *tx; | |
842 | int err = 0; | |
ff7b0479 SB |
843 | |
844 | src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | |
845 | if (!src) | |
846 | return -ENOMEM; | |
847 | ||
848 | dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); | |
849 | if (!dest) { | |
850 | kfree(src); | |
851 | return -ENOMEM; | |
852 | } | |
853 | ||
854 | /* Fill in src buffer */ | |
855 | for (i = 0; i < MV_XOR_TEST_SIZE; i++) | |
856 | ((u8 *) src)[i] = (u8)i; | |
857 | ||
275cc0c8 | 858 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 859 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
860 | err = -ENODEV; |
861 | goto out; | |
862 | } | |
863 | ||
864 | dest_dma = dma_map_single(dma_chan->device->dev, dest, | |
865 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); | |
866 | ||
867 | src_dma = dma_map_single(dma_chan->device->dev, src, | |
868 | MV_XOR_TEST_SIZE, DMA_TO_DEVICE); | |
869 | ||
870 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
871 | MV_XOR_TEST_SIZE, 0); | |
872 | cookie = mv_xor_tx_submit(tx); | |
873 | mv_xor_issue_pending(dma_chan); | |
874 | async_tx_ack(tx); | |
875 | msleep(1); | |
876 | ||
07934481 | 877 | if (mv_xor_status(dma_chan, cookie, NULL) != |
ff7b0479 | 878 | DMA_SUCCESS) { |
a3fc74bc TP |
879 | dev_err(dma_chan->device->dev, |
880 | "Self-test copy timed out, disabling\n"); | |
ff7b0479 SB |
881 | err = -ENODEV; |
882 | goto free_resources; | |
883 | } | |
884 | ||
c35064c4 | 885 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
886 | MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); |
887 | if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { | |
a3fc74bc TP |
888 | dev_err(dma_chan->device->dev, |
889 | "Self-test copy failed compare, disabling\n"); | |
ff7b0479 SB |
890 | err = -ENODEV; |
891 | goto free_resources; | |
892 | } | |
893 | ||
894 | free_resources: | |
895 | mv_xor_free_chan_resources(dma_chan); | |
896 | out: | |
897 | kfree(src); | |
898 | kfree(dest); | |
899 | return err; | |
900 | } | |
901 | ||
902 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
463a1f8b | 903 | static int |
275cc0c8 | 904 | mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
905 | { |
906 | int i, src_idx; | |
907 | struct page *dest; | |
908 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
909 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
910 | dma_addr_t dest_dma; | |
911 | struct dma_async_tx_descriptor *tx; | |
912 | struct dma_chan *dma_chan; | |
913 | dma_cookie_t cookie; | |
914 | u8 cmp_byte = 0; | |
915 | u32 cmp_word; | |
916 | int err = 0; | |
ff7b0479 SB |
917 | |
918 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | |
919 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
920 | if (!xor_srcs[src_idx]) { |
921 | while (src_idx--) | |
ff7b0479 | 922 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
923 | return -ENOMEM; |
924 | } | |
ff7b0479 SB |
925 | } |
926 | ||
927 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
928 | if (!dest) { |
929 | while (src_idx--) | |
ff7b0479 | 930 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
931 | return -ENOMEM; |
932 | } | |
ff7b0479 SB |
933 | |
934 | /* Fill in src buffers */ | |
935 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { | |
936 | u8 *ptr = page_address(xor_srcs[src_idx]); | |
937 | for (i = 0; i < PAGE_SIZE; i++) | |
938 | ptr[i] = (1 << src_idx); | |
939 | } | |
940 | ||
941 | for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) | |
942 | cmp_byte ^= (u8) (1 << src_idx); | |
943 | ||
944 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
945 | (cmp_byte << 8) | cmp_byte; | |
946 | ||
947 | memset(page_address(dest), 0, PAGE_SIZE); | |
948 | ||
275cc0c8 | 949 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 950 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
951 | err = -ENODEV; |
952 | goto out; | |
953 | } | |
954 | ||
955 | /* test xor */ | |
956 | dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, | |
957 | DMA_FROM_DEVICE); | |
958 | ||
959 | for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) | |
960 | dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
961 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
962 | ||
963 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
964 | MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); | |
965 | ||
966 | cookie = mv_xor_tx_submit(tx); | |
967 | mv_xor_issue_pending(dma_chan); | |
968 | async_tx_ack(tx); | |
969 | msleep(8); | |
970 | ||
07934481 | 971 | if (mv_xor_status(dma_chan, cookie, NULL) != |
ff7b0479 | 972 | DMA_SUCCESS) { |
a3fc74bc TP |
973 | dev_err(dma_chan->device->dev, |
974 | "Self-test xor timed out, disabling\n"); | |
ff7b0479 SB |
975 | err = -ENODEV; |
976 | goto free_resources; | |
977 | } | |
978 | ||
c35064c4 | 979 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
980 | PAGE_SIZE, DMA_FROM_DEVICE); |
981 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
982 | u32 *ptr = page_address(dest); | |
983 | if (ptr[i] != cmp_word) { | |
a3fc74bc | 984 | dev_err(dma_chan->device->dev, |
1ba151cd JP |
985 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
986 | i, ptr[i], cmp_word); | |
ff7b0479 SB |
987 | err = -ENODEV; |
988 | goto free_resources; | |
989 | } | |
990 | } | |
991 | ||
992 | free_resources: | |
993 | mv_xor_free_chan_resources(dma_chan); | |
994 | out: | |
995 | src_idx = MV_XOR_NUM_SRC_TEST; | |
996 | while (src_idx--) | |
997 | __free_page(xor_srcs[src_idx]); | |
998 | __free_page(dest); | |
999 | return err; | |
1000 | } | |
1001 | ||
34c93c86 AL |
1002 | /* This driver does not implement any of the optional DMA operations. */ |
1003 | static int | |
1004 | mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
1005 | unsigned long arg) | |
1006 | { | |
1007 | return -ENOSYS; | |
1008 | } | |
1009 | ||
1ef48a26 | 1010 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b0479 | 1011 | { |
ff7b0479 | 1012 | struct dma_chan *chan, *_chan; |
1ef48a26 | 1013 | struct device *dev = mv_chan->dmadev.dev; |
ff7b0479 | 1014 | |
1ef48a26 | 1015 | dma_async_device_unregister(&mv_chan->dmadev); |
ff7b0479 | 1016 | |
b503fa01 | 1017 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1018 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
ff7b0479 | 1019 | |
1ef48a26 | 1020 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2 | 1021 | device_node) { |
ff7b0479 SB |
1022 | list_del(&chan->device_node); |
1023 | } | |
1024 | ||
88eb92cb TP |
1025 | free_irq(mv_chan->irq, mv_chan); |
1026 | ||
ff7b0479 SB |
1027 | return 0; |
1028 | } | |
1029 | ||
1ef48a26 | 1030 | static struct mv_xor_chan * |
297eedba | 1031 | mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2 | 1032 | struct platform_device *pdev, |
b503fa01 | 1033 | int idx, dma_cap_mask_t cap_mask, int irq) |
ff7b0479 SB |
1034 | { |
1035 | int ret = 0; | |
ff7b0479 SB |
1036 | struct mv_xor_chan *mv_chan; |
1037 | struct dma_device *dma_dev; | |
ff7b0479 | 1038 | |
1ef48a26 | 1039 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f SK |
1040 | if (!mv_chan) |
1041 | return ERR_PTR(-ENOMEM); | |
ff7b0479 | 1042 | |
9aedbdba | 1043 | mv_chan->idx = idx; |
88eb92cb | 1044 | mv_chan->irq = irq; |
ff7b0479 | 1045 | |
1ef48a26 | 1046 | dma_dev = &mv_chan->dmadev; |
ff7b0479 SB |
1047 | |
1048 | /* allocate coherent memory for hardware descriptors | |
1049 | * note: writecombine gives slightly better performance, but | |
1050 | * requires that we explicitly flush the writes | |
1051 | */ | |
1ef48a26 | 1052 | mv_chan->dma_desc_pool_virt = |
b503fa01 | 1053 | dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 TP |
1054 | &mv_chan->dma_desc_pool, GFP_KERNEL); |
1055 | if (!mv_chan->dma_desc_pool_virt) | |
a6b4a9d2 | 1056 | return ERR_PTR(-ENOMEM); |
ff7b0479 SB |
1057 | |
1058 | /* discover transaction capabilites from the platform data */ | |
a6b4a9d2 | 1059 | dma_dev->cap_mask = cap_mask; |
ff7b0479 SB |
1060 | |
1061 | INIT_LIST_HEAD(&dma_dev->channels); | |
1062 | ||
1063 | /* set base routines */ | |
1064 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
1065 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
07934481 | 1066 | dma_dev->device_tx_status = mv_xor_status; |
ff7b0479 | 1067 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
34c93c86 | 1068 | dma_dev->device_control = mv_xor_control; |
ff7b0479 SB |
1069 | dma_dev->dev = &pdev->dev; |
1070 | ||
1071 | /* set prep routines based on capability */ | |
1072 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | |
1073 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
ff7b0479 | 1074 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894e | 1075 | dma_dev->max_xor = 8; |
ff7b0479 SB |
1076 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1077 | } | |
1078 | ||
297eedba | 1079 | mv_chan->mmr_base = xordev->xor_base; |
ff7b0479 SB |
1080 | if (!mv_chan->mmr_base) { |
1081 | ret = -ENOMEM; | |
1082 | goto err_free_dma; | |
1083 | } | |
1084 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) | |
1085 | mv_chan); | |
1086 | ||
1087 | /* clear errors before enabling interrupts */ | |
1088 | mv_xor_device_clear_err_status(mv_chan); | |
1089 | ||
2d0a0745 TP |
1090 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1091 | 0, dev_name(&pdev->dev), mv_chan); | |
ff7b0479 SB |
1092 | if (ret) |
1093 | goto err_free_dma; | |
1094 | ||
1095 | mv_chan_unmask_interrupts(mv_chan); | |
1096 | ||
1097 | mv_set_mode(mv_chan, DMA_MEMCPY); | |
1098 | ||
1099 | spin_lock_init(&mv_chan->lock); | |
1100 | INIT_LIST_HEAD(&mv_chan->chain); | |
1101 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
1102 | INIT_LIST_HEAD(&mv_chan->all_slots); | |
98817b99 TP |
1103 | mv_chan->dmachan.device = dma_dev; |
1104 | dma_cookie_init(&mv_chan->dmachan); | |
ff7b0479 | 1105 | |
98817b99 | 1106 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b0479 SB |
1107 | |
1108 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
275cc0c8 | 1109 | ret = mv_xor_memcpy_self_test(mv_chan); |
ff7b0479 SB |
1110 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1111 | if (ret) | |
2d0a0745 | 1112 | goto err_free_irq; |
ff7b0479 SB |
1113 | } |
1114 | ||
1115 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
275cc0c8 | 1116 | ret = mv_xor_xor_self_test(mv_chan); |
ff7b0479 SB |
1117 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1118 | if (ret) | |
2d0a0745 | 1119 | goto err_free_irq; |
ff7b0479 SB |
1120 | } |
1121 | ||
48a9db46 | 1122 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1ba151cd | 1123 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cd JP |
1124 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1125 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
ff7b0479 SB |
1126 | |
1127 | dma_async_device_register(dma_dev); | |
1ef48a26 | 1128 | return mv_chan; |
ff7b0479 | 1129 | |
2d0a0745 TP |
1130 | err_free_irq: |
1131 | free_irq(mv_chan->irq, mv_chan); | |
ff7b0479 | 1132 | err_free_dma: |
b503fa01 | 1133 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1134 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2 | 1135 | return ERR_PTR(ret); |
ff7b0479 SB |
1136 | } |
1137 | ||
1138 | static void | |
297eedba | 1139 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b | 1140 | const struct mbus_dram_target_info *dram) |
ff7b0479 | 1141 | { |
297eedba | 1142 | void __iomem *base = xordev->xor_base; |
ff7b0479 SB |
1143 | u32 win_enable = 0; |
1144 | int i; | |
1145 | ||
1146 | for (i = 0; i < 8; i++) { | |
1147 | writel(0, base + WINDOW_BASE(i)); | |
1148 | writel(0, base + WINDOW_SIZE(i)); | |
1149 | if (i < 4) | |
1150 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1151 | } | |
1152 | ||
1153 | for (i = 0; i < dram->num_cs; i++) { | |
63a9332b | 1154 | const struct mbus_dram_window *cs = dram->cs + i; |
ff7b0479 SB |
1155 | |
1156 | writel((cs->base & 0xffff0000) | | |
1157 | (cs->mbus_attr << 8) | | |
1158 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1159 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1160 | ||
1161 | win_enable |= (1 << i); | |
1162 | win_enable |= 3 << (16 + (2 * i)); | |
1163 | } | |
1164 | ||
1165 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1166 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
c4b4b732 TP |
1167 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); |
1168 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | |
ff7b0479 SB |
1169 | } |
1170 | ||
c2714334 | 1171 | static int mv_xor_probe(struct platform_device *pdev) |
ff7b0479 | 1172 | { |
63a9332b | 1173 | const struct mbus_dram_target_info *dram; |
297eedba | 1174 | struct mv_xor_device *xordev; |
d4adcc01 | 1175 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b0479 | 1176 | struct resource *res; |
60d151f3 | 1177 | int i, ret; |
ff7b0479 | 1178 | |
1ba151cd | 1179 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
ff7b0479 | 1180 | |
297eedba TP |
1181 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1182 | if (!xordev) | |
ff7b0479 SB |
1183 | return -ENOMEM; |
1184 | ||
1185 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1186 | if (!res) | |
1187 | return -ENODEV; | |
1188 | ||
297eedba TP |
1189 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, |
1190 | resource_size(res)); | |
1191 | if (!xordev->xor_base) | |
ff7b0479 SB |
1192 | return -EBUSY; |
1193 | ||
1194 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1195 | if (!res) | |
1196 | return -ENODEV; | |
1197 | ||
297eedba TP |
1198 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1199 | resource_size(res)); | |
1200 | if (!xordev->xor_high_base) | |
ff7b0479 SB |
1201 | return -EBUSY; |
1202 | ||
297eedba | 1203 | platform_set_drvdata(pdev, xordev); |
ff7b0479 SB |
1204 | |
1205 | /* | |
1206 | * (Re-)program MBUS remapping windows if we are asked to. | |
1207 | */ | |
63a9332b AL |
1208 | dram = mv_mbus_dram_info(); |
1209 | if (dram) | |
297eedba | 1210 | mv_xor_conf_mbus_windows(xordev, dram); |
ff7b0479 | 1211 | |
c510182b AL |
1212 | /* Not all platforms can gate the clock, so it is not |
1213 | * an error if the clock does not exists. | |
1214 | */ | |
297eedba TP |
1215 | xordev->clk = clk_get(&pdev->dev, NULL); |
1216 | if (!IS_ERR(xordev->clk)) | |
1217 | clk_prepare_enable(xordev->clk); | |
c510182b | 1218 | |
f7d12ef5 TP |
1219 | if (pdev->dev.of_node) { |
1220 | struct device_node *np; | |
1221 | int i = 0; | |
1222 | ||
1223 | for_each_child_of_node(pdev->dev.of_node, np) { | |
1224 | dma_cap_mask_t cap_mask; | |
1225 | int irq; | |
1226 | ||
1227 | dma_cap_zero(cap_mask); | |
1228 | if (of_property_read_bool(np, "dmacap,memcpy")) | |
1229 | dma_cap_set(DMA_MEMCPY, cap_mask); | |
1230 | if (of_property_read_bool(np, "dmacap,xor")) | |
1231 | dma_cap_set(DMA_XOR, cap_mask); | |
f7d12ef5 TP |
1232 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1233 | dma_cap_set(DMA_INTERRUPT, cap_mask); | |
1234 | ||
1235 | irq = irq_of_parse_and_map(np, 0); | |
f8eb9e7d TP |
1236 | if (!irq) { |
1237 | ret = -ENODEV; | |
f7d12ef5 TP |
1238 | goto err_channel_add; |
1239 | } | |
1240 | ||
1241 | xordev->channels[i] = | |
1242 | mv_xor_channel_add(xordev, pdev, i, | |
1243 | cap_mask, irq); | |
1244 | if (IS_ERR(xordev->channels[i])) { | |
1245 | ret = PTR_ERR(xordev->channels[i]); | |
73d9cdca | 1246 | xordev->channels[i] = NULL; |
f7d12ef5 TP |
1247 | irq_dispose_mapping(irq); |
1248 | goto err_channel_add; | |
1249 | } | |
1250 | ||
1251 | i++; | |
1252 | } | |
1253 | } else if (pdata && pdata->channels) { | |
60d151f3 | 1254 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
e39f6ec1 | 1255 | struct mv_xor_channel_data *cd; |
60d151f3 TP |
1256 | int irq; |
1257 | ||
1258 | cd = &pdata->channels[i]; | |
1259 | if (!cd) { | |
1260 | ret = -ENODEV; | |
1261 | goto err_channel_add; | |
1262 | } | |
1263 | ||
1264 | irq = platform_get_irq(pdev, i); | |
1265 | if (irq < 0) { | |
1266 | ret = irq; | |
1267 | goto err_channel_add; | |
1268 | } | |
1269 | ||
297eedba | 1270 | xordev->channels[i] = |
9aedbdba | 1271 | mv_xor_channel_add(xordev, pdev, i, |
b503fa01 | 1272 | cd->cap_mask, irq); |
297eedba TP |
1273 | if (IS_ERR(xordev->channels[i])) { |
1274 | ret = PTR_ERR(xordev->channels[i]); | |
60d151f3 TP |
1275 | goto err_channel_add; |
1276 | } | |
1277 | } | |
1278 | } | |
c510182b | 1279 | |
ff7b0479 | 1280 | return 0; |
60d151f3 TP |
1281 | |
1282 | err_channel_add: | |
1283 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | |
f7d12ef5 | 1284 | if (xordev->channels[i]) { |
ab6e439f | 1285 | mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef5 TP |
1286 | if (pdev->dev.of_node) |
1287 | irq_dispose_mapping(xordev->channels[i]->irq); | |
f7d12ef5 | 1288 | } |
60d151f3 | 1289 | |
dab92064 TP |
1290 | if (!IS_ERR(xordev->clk)) { |
1291 | clk_disable_unprepare(xordev->clk); | |
1292 | clk_put(xordev->clk); | |
1293 | } | |
1294 | ||
60d151f3 | 1295 | return ret; |
ff7b0479 SB |
1296 | } |
1297 | ||
c2714334 | 1298 | static int mv_xor_remove(struct platform_device *pdev) |
ff7b0479 | 1299 | { |
297eedba | 1300 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); |
60d151f3 TP |
1301 | int i; |
1302 | ||
1303 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
297eedba TP |
1304 | if (xordev->channels[i]) |
1305 | mv_xor_channel_remove(xordev->channels[i]); | |
60d151f3 | 1306 | } |
c510182b | 1307 | |
297eedba TP |
1308 | if (!IS_ERR(xordev->clk)) { |
1309 | clk_disable_unprepare(xordev->clk); | |
1310 | clk_put(xordev->clk); | |
c510182b AL |
1311 | } |
1312 | ||
ff7b0479 SB |
1313 | return 0; |
1314 | } | |
1315 | ||
f7d12ef5 | 1316 | #ifdef CONFIG_OF |
c2714334 | 1317 | static struct of_device_id mv_xor_dt_ids[] = { |
f7d12ef5 TP |
1318 | { .compatible = "marvell,orion-xor", }, |
1319 | {}, | |
1320 | }; | |
1321 | MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); | |
1322 | #endif | |
1323 | ||
61971656 TP |
1324 | static struct platform_driver mv_xor_driver = { |
1325 | .probe = mv_xor_probe, | |
c2714334 | 1326 | .remove = mv_xor_remove, |
ff7b0479 | 1327 | .driver = { |
f7d12ef5 TP |
1328 | .owner = THIS_MODULE, |
1329 | .name = MV_XOR_NAME, | |
1330 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | |
ff7b0479 SB |
1331 | }, |
1332 | }; | |
1333 | ||
1334 | ||
1335 | static int __init mv_xor_init(void) | |
1336 | { | |
61971656 | 1337 | return platform_driver_register(&mv_xor_driver); |
ff7b0479 SB |
1338 | } |
1339 | module_init(mv_xor_init); | |
1340 | ||
1341 | /* it's currently unsafe to unload this module */ | |
1342 | #if 0 | |
1343 | static void __exit mv_xor_exit(void) | |
1344 | { | |
1345 | platform_driver_unregister(&mv_xor_driver); | |
ff7b0479 SB |
1346 | return; |
1347 | } | |
1348 | ||
1349 | module_exit(mv_xor_exit); | |
1350 | #endif | |
1351 | ||
1352 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | |
1353 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1354 | MODULE_LICENSE("GPL"); |