]>
Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
ff7b0479 SB |
13 | */ |
14 | ||
15 | #include <linux/init.h> | |
5a0e3ad6 | 16 | #include <linux/slab.h> |
ff7b0479 SB |
17 | #include <linux/delay.h> |
18 | #include <linux/dma-mapping.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/interrupt.h> | |
6f166312 | 21 | #include <linux/of_device.h> |
ff7b0479 SB |
22 | #include <linux/platform_device.h> |
23 | #include <linux/memory.h> | |
c510182b | 24 | #include <linux/clk.h> |
f7d12ef5 TP |
25 | #include <linux/of.h> |
26 | #include <linux/of_irq.h> | |
27 | #include <linux/irqdomain.h> | |
77757291 | 28 | #include <linux/cpumask.h> |
c02cecb9 | 29 | #include <linux/platform_data/dma-mv_xor.h> |
d2ebfb33 RKAL |
30 | |
31 | #include "dmaengine.h" | |
ff7b0479 SB |
32 | #include "mv_xor.h" |
33 | ||
dd130c65 GC |
34 | enum mv_xor_type { |
35 | XOR_ORION, | |
36 | XOR_ARMADA_38X, | |
37 | }; | |
38 | ||
6f166312 LA |
39 | enum mv_xor_mode { |
40 | XOR_MODE_IN_REG, | |
41 | XOR_MODE_IN_DESC, | |
42 | }; | |
43 | ||
ff7b0479 SB |
44 | static void mv_xor_issue_pending(struct dma_chan *chan); |
45 | ||
46 | #define to_mv_xor_chan(chan) \ | |
98817b99 | 47 | container_of(chan, struct mv_xor_chan, dmachan) |
ff7b0479 SB |
48 | |
49 | #define to_mv_xor_slot(tx) \ | |
50 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
51 | ||
c98c1781 | 52 | #define mv_chan_to_devp(chan) \ |
1ef48a26 | 53 | ((chan)->dmadev.dev) |
c98c1781 | 54 | |
dfc97661 | 55 | static void mv_desc_init(struct mv_xor_desc_slot *desc, |
ba87d137 LA |
56 | dma_addr_t addr, u32 byte_count, |
57 | enum dma_ctrl_flags flags) | |
ff7b0479 SB |
58 | { |
59 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
60 | ||
0e7488ed | 61 | hw_desc->status = XOR_DESC_DMA_OWNED; |
ff7b0479 | 62 | hw_desc->phy_next_desc = 0; |
ba87d137 LA |
63 | /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ |
64 | hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? | |
65 | XOR_DESC_EOD_INT_EN : 0; | |
dfc97661 | 66 | hw_desc->phy_dest_addr = addr; |
ff7b0479 SB |
67 | hw_desc->byte_count = byte_count; |
68 | } | |
69 | ||
6f166312 LA |
70 | static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) |
71 | { | |
72 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
73 | ||
74 | switch (desc->type) { | |
75 | case DMA_XOR: | |
76 | case DMA_INTERRUPT: | |
77 | hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; | |
78 | break; | |
79 | case DMA_MEMCPY: | |
80 | hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; | |
81 | break; | |
82 | default: | |
83 | BUG(); | |
84 | return; | |
85 | } | |
86 | } | |
87 | ||
ff7b0479 SB |
88 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, |
89 | u32 next_desc_addr) | |
90 | { | |
91 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
92 | BUG_ON(hw_desc->phy_next_desc); | |
93 | hw_desc->phy_next_desc = next_desc_addr; | |
94 | } | |
95 | ||
ff7b0479 SB |
96 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, |
97 | int index, dma_addr_t addr) | |
98 | { | |
99 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 100 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b0479 SB |
101 | if (desc->type == DMA_XOR) |
102 | hw_desc->desc_command |= (1 << index); | |
103 | } | |
104 | ||
105 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
106 | { | |
5733c38a | 107 | return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b0479 SB |
108 | } |
109 | ||
110 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
111 | u32 next_desc_addr) | |
112 | { | |
5733c38a | 113 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b0479 SB |
114 | } |
115 | ||
ff7b0479 SB |
116 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
117 | { | |
5733c38a | 118 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b0479 | 119 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38a | 120 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b0479 SB |
121 | } |
122 | ||
123 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
124 | { | |
5733c38a | 125 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
126 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
127 | return intr_cause; | |
128 | } | |
129 | ||
0951e728 | 130 | static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) |
ff7b0479 | 131 | { |
ba87d137 LA |
132 | u32 val; |
133 | ||
134 | val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; | |
135 | val = ~(val << (chan->idx * 16)); | |
c98c1781 | 136 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
5733c38a | 137 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
138 | } |
139 | ||
0951e728 | 140 | static void mv_chan_clear_err_status(struct mv_xor_chan *chan) |
ff7b0479 SB |
141 | { |
142 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
5733c38a | 143 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
144 | } |
145 | ||
0951e728 | 146 | static void mv_chan_set_mode(struct mv_xor_chan *chan, |
81aafb3e | 147 | u32 op_mode) |
ff7b0479 | 148 | { |
5733c38a | 149 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b0479 | 150 | |
6f166312 LA |
151 | config &= ~0x7; |
152 | config |= op_mode; | |
153 | ||
e03bc654 TP |
154 | #if defined(__BIG_ENDIAN) |
155 | config |= XOR_DESCRIPTOR_SWAP; | |
156 | #else | |
157 | config &= ~XOR_DESCRIPTOR_SWAP; | |
158 | #endif | |
159 | ||
5733c38a | 160 | writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b0479 SB |
161 | } |
162 | ||
163 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
164 | { | |
c98c1781 | 165 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
5a9a55bf EG |
166 | |
167 | /* writel ensures all descriptors are flushed before activation */ | |
168 | writel(BIT(0), XOR_ACTIVATION(chan)); | |
ff7b0479 SB |
169 | } |
170 | ||
171 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
172 | { | |
5733c38a | 173 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 SB |
174 | |
175 | state = (state >> 4) & 0x3; | |
176 | ||
177 | return (state == 1) ? 1 : 0; | |
178 | } | |
179 | ||
ff7b0479 | 180 | /* |
0951e728 MR |
181 | * mv_chan_start_new_chain - program the engine to operate on new |
182 | * chain headed by sw_desc | |
ff7b0479 SB |
183 | * Caller must hold &mv_chan->lock while calling this function |
184 | */ | |
0951e728 MR |
185 | static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, |
186 | struct mv_xor_desc_slot *sw_desc) | |
ff7b0479 | 187 | { |
c98c1781 | 188 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
ff7b0479 | 189 | __func__, __LINE__, sw_desc); |
ff7b0479 | 190 | |
48a9db46 BZ |
191 | /* set the hardware chain */ |
192 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
193 | ||
dfc97661 | 194 | mv_chan->pending++; |
98817b99 | 195 | mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b0479 SB |
196 | } |
197 | ||
198 | static dma_cookie_t | |
0951e728 MR |
199 | mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, |
200 | struct mv_xor_chan *mv_chan, | |
201 | dma_cookie_t cookie) | |
ff7b0479 SB |
202 | { |
203 | BUG_ON(desc->async_tx.cookie < 0); | |
204 | ||
205 | if (desc->async_tx.cookie > 0) { | |
206 | cookie = desc->async_tx.cookie; | |
207 | ||
208 | /* call the callback (must not sleep or submit new | |
209 | * operations to this channel) | |
210 | */ | |
211 | if (desc->async_tx.callback) | |
212 | desc->async_tx.callback( | |
213 | desc->async_tx.callback_param); | |
214 | ||
d38a8c62 | 215 | dma_descriptor_unmap(&desc->async_tx); |
ff7b0479 SB |
216 | } |
217 | ||
218 | /* run dependent operations */ | |
07f2211e | 219 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
220 | |
221 | return cookie; | |
222 | } | |
223 | ||
224 | static int | |
0951e728 | 225 | mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
226 | { |
227 | struct mv_xor_desc_slot *iter, *_iter; | |
228 | ||
c98c1781 | 229 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
ff7b0479 | 230 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
fbea28a2 | 231 | node) { |
ff7b0479 | 232 | |
fbea28a2 LA |
233 | if (async_tx_test_ack(&iter->async_tx)) |
234 | list_move_tail(&iter->node, &mv_chan->free_slots); | |
ff7b0479 SB |
235 | } |
236 | return 0; | |
237 | } | |
238 | ||
239 | static int | |
0951e728 MR |
240 | mv_desc_clean_slot(struct mv_xor_desc_slot *desc, |
241 | struct mv_xor_chan *mv_chan) | |
ff7b0479 | 242 | { |
c98c1781 | 243 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", |
ff7b0479 | 244 | __func__, __LINE__, desc, desc->async_tx.flags); |
fbea28a2 | 245 | |
ff7b0479 SB |
246 | /* the client is allowed to attach dependent operations |
247 | * until 'ack' is set | |
248 | */ | |
fbea28a2 | 249 | if (!async_tx_test_ack(&desc->async_tx)) |
ff7b0479 | 250 | /* move this slot to the completed_slots */ |
fbea28a2 LA |
251 | list_move_tail(&desc->node, &mv_chan->completed_slots); |
252 | else | |
253 | list_move_tail(&desc->node, &mv_chan->free_slots); | |
ff7b0479 | 254 | |
ff7b0479 SB |
255 | return 0; |
256 | } | |
257 | ||
fbeec99a | 258 | /* This function must be called with the mv_xor_chan spinlock held */ |
0951e728 | 259 | static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
260 | { |
261 | struct mv_xor_desc_slot *iter, *_iter; | |
262 | dma_cookie_t cookie = 0; | |
263 | int busy = mv_chan_is_busy(mv_chan); | |
264 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
9136291f LA |
265 | int current_cleaned = 0; |
266 | struct mv_xor_desc *hw_desc; | |
ff7b0479 | 267 | |
c98c1781 TP |
268 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
269 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | |
0951e728 | 270 | mv_chan_clean_completed_slots(mv_chan); |
ff7b0479 SB |
271 | |
272 | /* free completed slots from the chain starting with | |
273 | * the oldest descriptor | |
274 | */ | |
275 | ||
276 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
fbea28a2 | 277 | node) { |
ff7b0479 | 278 | |
9136291f LA |
279 | /* clean finished descriptors */ |
280 | hw_desc = iter->hw_desc; | |
281 | if (hw_desc->status & XOR_DESC_SUCCESS) { | |
0951e728 MR |
282 | cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, |
283 | cookie); | |
ff7b0479 | 284 | |
9136291f | 285 | /* done processing desc, clean slot */ |
0951e728 | 286 | mv_desc_clean_slot(iter, mv_chan); |
9136291f LA |
287 | |
288 | /* break if we did cleaned the current */ | |
289 | if (iter->async_tx.phys == current_desc) { | |
290 | current_cleaned = 1; | |
291 | break; | |
292 | } | |
293 | } else { | |
294 | if (iter->async_tx.phys == current_desc) { | |
295 | current_cleaned = 0; | |
ff7b0479 | 296 | break; |
9136291f | 297 | } |
ff7b0479 | 298 | } |
ff7b0479 SB |
299 | } |
300 | ||
301 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
9136291f LA |
302 | if (current_cleaned) { |
303 | /* | |
304 | * current descriptor cleaned and removed, run | |
305 | * from list head | |
306 | */ | |
307 | iter = list_entry(mv_chan->chain.next, | |
308 | struct mv_xor_desc_slot, | |
fbea28a2 | 309 | node); |
0951e728 | 310 | mv_chan_start_new_chain(mv_chan, iter); |
9136291f | 311 | } else { |
fbea28a2 | 312 | if (!list_is_last(&iter->node, &mv_chan->chain)) { |
9136291f LA |
313 | /* |
314 | * descriptors are still waiting after | |
315 | * current, trigger them | |
316 | */ | |
fbea28a2 | 317 | iter = list_entry(iter->node.next, |
9136291f | 318 | struct mv_xor_desc_slot, |
fbea28a2 | 319 | node); |
0951e728 | 320 | mv_chan_start_new_chain(mv_chan, iter); |
9136291f LA |
321 | } else { |
322 | /* | |
323 | * some descriptors are still waiting | |
324 | * to be cleaned | |
325 | */ | |
326 | tasklet_schedule(&mv_chan->irq_tasklet); | |
327 | } | |
328 | } | |
ff7b0479 SB |
329 | } |
330 | ||
331 | if (cookie > 0) | |
98817b99 | 332 | mv_chan->dmachan.completed_cookie = cookie; |
ff7b0479 SB |
333 | } |
334 | ||
ff7b0479 SB |
335 | static void mv_xor_tasklet(unsigned long data) |
336 | { | |
337 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
e43147ac EG |
338 | |
339 | spin_lock_bh(&chan->lock); | |
0951e728 | 340 | mv_chan_slot_cleanup(chan); |
e43147ac | 341 | spin_unlock_bh(&chan->lock); |
ff7b0479 SB |
342 | } |
343 | ||
344 | static struct mv_xor_desc_slot * | |
0951e728 | 345 | mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) |
ff7b0479 | 346 | { |
fbea28a2 | 347 | struct mv_xor_desc_slot *iter; |
ff7b0479 | 348 | |
fbea28a2 LA |
349 | spin_lock_bh(&mv_chan->lock); |
350 | ||
351 | if (!list_empty(&mv_chan->free_slots)) { | |
352 | iter = list_first_entry(&mv_chan->free_slots, | |
353 | struct mv_xor_desc_slot, | |
354 | node); | |
355 | ||
356 | list_move_tail(&iter->node, &mv_chan->allocated_slots); | |
357 | ||
358 | spin_unlock_bh(&mv_chan->lock); | |
ff7b0479 | 359 | |
dfc97661 LA |
360 | /* pre-ack descriptor */ |
361 | async_tx_ack(&iter->async_tx); | |
dfc97661 | 362 | iter->async_tx.cookie = -EBUSY; |
dfc97661 LA |
363 | |
364 | return iter; | |
365 | ||
ff7b0479 | 366 | } |
fbea28a2 LA |
367 | |
368 | spin_unlock_bh(&mv_chan->lock); | |
ff7b0479 SB |
369 | |
370 | /* try to free some slots if the allocation fails */ | |
371 | tasklet_schedule(&mv_chan->irq_tasklet); | |
372 | ||
373 | return NULL; | |
374 | } | |
375 | ||
ff7b0479 SB |
376 | /************************ DMA engine API functions ****************************/ |
377 | static dma_cookie_t | |
378 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
379 | { | |
380 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
381 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
dfc97661 | 382 | struct mv_xor_desc_slot *old_chain_tail; |
ff7b0479 SB |
383 | dma_cookie_t cookie; |
384 | int new_hw_chain = 1; | |
385 | ||
c98c1781 | 386 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
387 | "%s sw_desc %p: async_tx %p\n", |
388 | __func__, sw_desc, &sw_desc->async_tx); | |
389 | ||
ff7b0479 | 390 | spin_lock_bh(&mv_chan->lock); |
884485e1 | 391 | cookie = dma_cookie_assign(tx); |
ff7b0479 SB |
392 | |
393 | if (list_empty(&mv_chan->chain)) | |
fbea28a2 | 394 | list_move_tail(&sw_desc->node, &mv_chan->chain); |
ff7b0479 SB |
395 | else { |
396 | new_hw_chain = 0; | |
397 | ||
398 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
399 | struct mv_xor_desc_slot, | |
fbea28a2 LA |
400 | node); |
401 | list_move_tail(&sw_desc->node, &mv_chan->chain); | |
ff7b0479 | 402 | |
31fd8f5b OJ |
403 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
404 | &old_chain_tail->async_tx.phys); | |
ff7b0479 SB |
405 | |
406 | /* fix up the hardware chain */ | |
dfc97661 | 407 | mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
ff7b0479 SB |
408 | |
409 | /* if the channel is not busy */ | |
410 | if (!mv_chan_is_busy(mv_chan)) { | |
411 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
412 | /* | |
413 | * and the curren desc is the end of the chain before | |
414 | * the append, then we need to start the channel | |
415 | */ | |
416 | if (current_desc == old_chain_tail->async_tx.phys) | |
417 | new_hw_chain = 1; | |
418 | } | |
419 | } | |
420 | ||
421 | if (new_hw_chain) | |
0951e728 | 422 | mv_chan_start_new_chain(mv_chan, sw_desc); |
ff7b0479 | 423 | |
ff7b0479 SB |
424 | spin_unlock_bh(&mv_chan->lock); |
425 | ||
426 | return cookie; | |
427 | } | |
428 | ||
429 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 430 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 | 431 | { |
31fd8f5b OJ |
432 | void *virt_desc; |
433 | dma_addr_t dma_desc; | |
ff7b0479 SB |
434 | int idx; |
435 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
436 | struct mv_xor_desc_slot *slot = NULL; | |
b503fa01 | 437 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b0479 SB |
438 | |
439 | /* Allocate descriptor slots */ | |
440 | idx = mv_chan->slots_allocated; | |
441 | while (idx < num_descs_in_pool) { | |
442 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
443 | if (!slot) { | |
b8291dde EG |
444 | dev_info(mv_chan_to_devp(mv_chan), |
445 | "channel only initialized %d descriptor slots", | |
446 | idx); | |
ff7b0479 SB |
447 | break; |
448 | } | |
31fd8f5b OJ |
449 | virt_desc = mv_chan->dma_desc_pool_virt; |
450 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
451 | |
452 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
453 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
fbea28a2 | 454 | INIT_LIST_HEAD(&slot->node); |
31fd8f5b OJ |
455 | dma_desc = mv_chan->dma_desc_pool; |
456 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
457 | slot->idx = idx++; |
458 | ||
459 | spin_lock_bh(&mv_chan->lock); | |
460 | mv_chan->slots_allocated = idx; | |
fbea28a2 | 461 | list_add_tail(&slot->node, &mv_chan->free_slots); |
ff7b0479 SB |
462 | spin_unlock_bh(&mv_chan->lock); |
463 | } | |
464 | ||
c98c1781 | 465 | dev_dbg(mv_chan_to_devp(mv_chan), |
fbea28a2 LA |
466 | "allocated %d descriptor slots\n", |
467 | mv_chan->slots_allocated); | |
ff7b0479 SB |
468 | |
469 | return mv_chan->slots_allocated ? : -ENOMEM; | |
470 | } | |
471 | ||
ff7b0479 SB |
472 | static struct dma_async_tx_descriptor * |
473 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
474 | unsigned int src_cnt, size_t len, unsigned long flags) | |
475 | { | |
476 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
dfc97661 | 477 | struct mv_xor_desc_slot *sw_desc; |
ff7b0479 SB |
478 | |
479 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
480 | return NULL; | |
481 | ||
7912d300 | 482 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 | 483 | |
c98c1781 | 484 | dev_dbg(mv_chan_to_devp(mv_chan), |
bc822e12 | 485 | "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", |
31fd8f5b | 486 | __func__, src_cnt, len, &dest, flags); |
ff7b0479 | 487 | |
0951e728 | 488 | sw_desc = mv_chan_alloc_slot(mv_chan); |
ff7b0479 SB |
489 | if (sw_desc) { |
490 | sw_desc->type = DMA_XOR; | |
491 | sw_desc->async_tx.flags = flags; | |
ba87d137 | 492 | mv_desc_init(sw_desc, dest, len, flags); |
6f166312 LA |
493 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
494 | mv_desc_set_mode(sw_desc); | |
ff7b0479 | 495 | while (src_cnt--) |
dfc97661 | 496 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
ff7b0479 | 497 | } |
fbea28a2 | 498 | |
c98c1781 | 499 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
500 | "%s sw_desc %p async_tx %p \n", |
501 | __func__, sw_desc, &sw_desc->async_tx); | |
502 | return sw_desc ? &sw_desc->async_tx : NULL; | |
503 | } | |
504 | ||
3e4f52e2 LA |
505 | static struct dma_async_tx_descriptor * |
506 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
507 | size_t len, unsigned long flags) | |
508 | { | |
509 | /* | |
510 | * A MEMCPY operation is identical to an XOR operation with only | |
511 | * a single source address. | |
512 | */ | |
513 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
514 | } | |
515 | ||
22843545 LA |
516 | static struct dma_async_tx_descriptor * |
517 | mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |
518 | { | |
519 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
520 | dma_addr_t src, dest; | |
521 | size_t len; | |
522 | ||
523 | src = mv_chan->dummy_src_addr; | |
524 | dest = mv_chan->dummy_dst_addr; | |
525 | len = MV_XOR_MIN_BYTE_COUNT; | |
526 | ||
527 | /* | |
528 | * We implement the DMA_INTERRUPT operation as a minimum sized | |
529 | * XOR operation with a single dummy source address. | |
530 | */ | |
531 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
532 | } | |
533 | ||
ff7b0479 SB |
534 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
535 | { | |
536 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
537 | struct mv_xor_desc_slot *iter, *_iter; | |
538 | int in_use_descs = 0; | |
539 | ||
ff7b0479 | 540 | spin_lock_bh(&mv_chan->lock); |
e43147ac | 541 | |
0951e728 | 542 | mv_chan_slot_cleanup(mv_chan); |
ff7b0479 | 543 | |
ff7b0479 | 544 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, |
fbea28a2 | 545 | node) { |
ff7b0479 | 546 | in_use_descs++; |
fbea28a2 | 547 | list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b0479 SB |
548 | } |
549 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
fbea28a2 LA |
550 | node) { |
551 | in_use_descs++; | |
552 | list_move_tail(&iter->node, &mv_chan->free_slots); | |
553 | } | |
554 | list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, | |
555 | node) { | |
ff7b0479 | 556 | in_use_descs++; |
fbea28a2 | 557 | list_move_tail(&iter->node, &mv_chan->free_slots); |
ff7b0479 SB |
558 | } |
559 | list_for_each_entry_safe_reverse( | |
fbea28a2 LA |
560 | iter, _iter, &mv_chan->free_slots, node) { |
561 | list_del(&iter->node); | |
ff7b0479 SB |
562 | kfree(iter); |
563 | mv_chan->slots_allocated--; | |
564 | } | |
ff7b0479 | 565 | |
c98c1781 | 566 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", |
ff7b0479 SB |
567 | __func__, mv_chan->slots_allocated); |
568 | spin_unlock_bh(&mv_chan->lock); | |
569 | ||
570 | if (in_use_descs) | |
c98c1781 | 571 | dev_err(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
572 | "freeing %d in use descriptors!\n", in_use_descs); |
573 | } | |
574 | ||
575 | /** | |
07934481 | 576 | * mv_xor_status - poll the status of an XOR transaction |
ff7b0479 SB |
577 | * @chan: XOR channel handle |
578 | * @cookie: XOR transaction identifier | |
07934481 | 579 | * @txstate: XOR transactions state holder (or NULL) |
ff7b0479 | 580 | */ |
07934481 | 581 | static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b0479 | 582 | dma_cookie_t cookie, |
07934481 | 583 | struct dma_tx_state *txstate) |
ff7b0479 SB |
584 | { |
585 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
ff7b0479 SB |
586 | enum dma_status ret; |
587 | ||
96a2af41 | 588 | ret = dma_cookie_status(chan, cookie, txstate); |
890766d2 | 589 | if (ret == DMA_COMPLETE) |
ff7b0479 | 590 | return ret; |
e43147ac EG |
591 | |
592 | spin_lock_bh(&mv_chan->lock); | |
0951e728 | 593 | mv_chan_slot_cleanup(mv_chan); |
e43147ac | 594 | spin_unlock_bh(&mv_chan->lock); |
ff7b0479 | 595 | |
96a2af41 | 596 | return dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
597 | } |
598 | ||
0951e728 | 599 | static void mv_chan_dump_regs(struct mv_xor_chan *chan) |
ff7b0479 SB |
600 | { |
601 | u32 val; | |
602 | ||
5733c38a | 603 | val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cd | 604 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
ff7b0479 | 605 | |
5733c38a | 606 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cd | 607 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
ff7b0479 | 608 | |
5733c38a | 609 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cd | 610 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
ff7b0479 | 611 | |
5733c38a | 612 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cd | 613 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
ff7b0479 | 614 | |
5733c38a | 615 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cd | 616 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
ff7b0479 | 617 | |
5733c38a | 618 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cd | 619 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
ff7b0479 SB |
620 | } |
621 | ||
0951e728 MR |
622 | static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, |
623 | u32 intr_cause) | |
ff7b0479 | 624 | { |
0e7488ed EG |
625 | if (intr_cause & XOR_INT_ERR_DECODE) { |
626 | dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); | |
627 | return; | |
ff7b0479 SB |
628 | } |
629 | ||
0e7488ed | 630 | dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", |
a3fc74bc | 631 | chan->idx, intr_cause); |
ff7b0479 | 632 | |
0951e728 | 633 | mv_chan_dump_regs(chan); |
0e7488ed | 634 | WARN_ON(1); |
ff7b0479 SB |
635 | } |
636 | ||
637 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
638 | { | |
639 | struct mv_xor_chan *chan = data; | |
640 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
641 | ||
c98c1781 | 642 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
ff7b0479 | 643 | |
0e7488ed | 644 | if (intr_cause & XOR_INTR_ERRORS) |
0951e728 | 645 | mv_chan_err_interrupt_handler(chan, intr_cause); |
ff7b0479 SB |
646 | |
647 | tasklet_schedule(&chan->irq_tasklet); | |
648 | ||
0951e728 | 649 | mv_chan_clear_eoc_cause(chan); |
ff7b0479 SB |
650 | |
651 | return IRQ_HANDLED; | |
652 | } | |
653 | ||
654 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
655 | { | |
656 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
657 | ||
658 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
659 | mv_chan->pending = 0; | |
660 | mv_chan_activate(mv_chan); | |
661 | } | |
662 | } | |
663 | ||
664 | /* | |
665 | * Perform a transaction to verify the HW works. | |
666 | */ | |
ff7b0479 | 667 | |
0951e728 | 668 | static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 669 | { |
b8c01d25 | 670 | int i, ret; |
ff7b0479 SB |
671 | void *src, *dest; |
672 | dma_addr_t src_dma, dest_dma; | |
673 | struct dma_chan *dma_chan; | |
674 | dma_cookie_t cookie; | |
675 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 676 | struct dmaengine_unmap_data *unmap; |
ff7b0479 | 677 | int err = 0; |
ff7b0479 | 678 | |
d16695a7 | 679 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
680 | if (!src) |
681 | return -ENOMEM; | |
682 | ||
d16695a7 | 683 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
684 | if (!dest) { |
685 | kfree(src); | |
686 | return -ENOMEM; | |
687 | } | |
688 | ||
689 | /* Fill in src buffer */ | |
d16695a7 | 690 | for (i = 0; i < PAGE_SIZE; i++) |
ff7b0479 SB |
691 | ((u8 *) src)[i] = (u8)i; |
692 | ||
275cc0c8 | 693 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 694 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
695 | err = -ENODEV; |
696 | goto out; | |
697 | } | |
698 | ||
d16695a7 EG |
699 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
700 | if (!unmap) { | |
701 | err = -ENOMEM; | |
702 | goto free_resources; | |
703 | } | |
704 | ||
705 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | |
706 | PAGE_SIZE, DMA_TO_DEVICE); | |
d16695a7 | 707 | unmap->addr[0] = src_dma; |
ff7b0479 | 708 | |
b8c01d25 EG |
709 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
710 | if (ret) { | |
711 | err = -ENOMEM; | |
712 | goto free_resources; | |
713 | } | |
714 | unmap->to_cnt = 1; | |
715 | ||
d16695a7 EG |
716 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
717 | PAGE_SIZE, DMA_FROM_DEVICE); | |
d16695a7 EG |
718 | unmap->addr[1] = dest_dma; |
719 | ||
b8c01d25 EG |
720 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |
721 | if (ret) { | |
722 | err = -ENOMEM; | |
723 | goto free_resources; | |
724 | } | |
725 | unmap->from_cnt = 1; | |
d16695a7 | 726 | unmap->len = PAGE_SIZE; |
ff7b0479 SB |
727 | |
728 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
d16695a7 | 729 | PAGE_SIZE, 0); |
b8c01d25 EG |
730 | if (!tx) { |
731 | dev_err(dma_chan->device->dev, | |
732 | "Self-test cannot prepare operation, disabling\n"); | |
733 | err = -ENODEV; | |
734 | goto free_resources; | |
735 | } | |
736 | ||
ff7b0479 | 737 | cookie = mv_xor_tx_submit(tx); |
b8c01d25 EG |
738 | if (dma_submit_error(cookie)) { |
739 | dev_err(dma_chan->device->dev, | |
740 | "Self-test submit error, disabling\n"); | |
741 | err = -ENODEV; | |
742 | goto free_resources; | |
743 | } | |
744 | ||
ff7b0479 SB |
745 | mv_xor_issue_pending(dma_chan); |
746 | async_tx_ack(tx); | |
747 | msleep(1); | |
748 | ||
07934481 | 749 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 750 | DMA_COMPLETE) { |
a3fc74bc TP |
751 | dev_err(dma_chan->device->dev, |
752 | "Self-test copy timed out, disabling\n"); | |
ff7b0479 SB |
753 | err = -ENODEV; |
754 | goto free_resources; | |
755 | } | |
756 | ||
c35064c4 | 757 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
d16695a7 EG |
758 | PAGE_SIZE, DMA_FROM_DEVICE); |
759 | if (memcmp(src, dest, PAGE_SIZE)) { | |
a3fc74bc TP |
760 | dev_err(dma_chan->device->dev, |
761 | "Self-test copy failed compare, disabling\n"); | |
ff7b0479 SB |
762 | err = -ENODEV; |
763 | goto free_resources; | |
764 | } | |
765 | ||
766 | free_resources: | |
d16695a7 | 767 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
768 | mv_xor_free_chan_resources(dma_chan); |
769 | out: | |
770 | kfree(src); | |
771 | kfree(dest); | |
772 | return err; | |
773 | } | |
774 | ||
775 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
463a1f8b | 776 | static int |
0951e728 | 777 | mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 778 | { |
b8c01d25 | 779 | int i, src_idx, ret; |
ff7b0479 SB |
780 | struct page *dest; |
781 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
782 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
783 | dma_addr_t dest_dma; | |
784 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 785 | struct dmaengine_unmap_data *unmap; |
ff7b0479 SB |
786 | struct dma_chan *dma_chan; |
787 | dma_cookie_t cookie; | |
788 | u8 cmp_byte = 0; | |
789 | u32 cmp_word; | |
790 | int err = 0; | |
d16695a7 | 791 | int src_count = MV_XOR_NUM_SRC_TEST; |
ff7b0479 | 792 | |
d16695a7 | 793 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 | 794 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
a09b09ae RK |
795 | if (!xor_srcs[src_idx]) { |
796 | while (src_idx--) | |
ff7b0479 | 797 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
798 | return -ENOMEM; |
799 | } | |
ff7b0479 SB |
800 | } |
801 | ||
802 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
803 | if (!dest) { |
804 | while (src_idx--) | |
ff7b0479 | 805 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
806 | return -ENOMEM; |
807 | } | |
ff7b0479 SB |
808 | |
809 | /* Fill in src buffers */ | |
d16695a7 | 810 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 SB |
811 | u8 *ptr = page_address(xor_srcs[src_idx]); |
812 | for (i = 0; i < PAGE_SIZE; i++) | |
813 | ptr[i] = (1 << src_idx); | |
814 | } | |
815 | ||
d16695a7 | 816 | for (src_idx = 0; src_idx < src_count; src_idx++) |
ff7b0479 SB |
817 | cmp_byte ^= (u8) (1 << src_idx); |
818 | ||
819 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
820 | (cmp_byte << 8) | cmp_byte; | |
821 | ||
822 | memset(page_address(dest), 0, PAGE_SIZE); | |
823 | ||
275cc0c8 | 824 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 825 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
826 | err = -ENODEV; |
827 | goto out; | |
828 | } | |
829 | ||
d16695a7 EG |
830 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, |
831 | GFP_KERNEL); | |
832 | if (!unmap) { | |
833 | err = -ENOMEM; | |
834 | goto free_resources; | |
835 | } | |
836 | ||
ff7b0479 | 837 | /* test xor */ |
d16695a7 EG |
838 | for (i = 0; i < src_count; i++) { |
839 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
840 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
841 | dma_srcs[i] = unmap->addr[i]; | |
b8c01d25 EG |
842 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); |
843 | if (ret) { | |
844 | err = -ENOMEM; | |
845 | goto free_resources; | |
846 | } | |
d16695a7 EG |
847 | unmap->to_cnt++; |
848 | } | |
ff7b0479 | 849 | |
d16695a7 EG |
850 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
851 | DMA_FROM_DEVICE); | |
852 | dest_dma = unmap->addr[src_count]; | |
b8c01d25 EG |
853 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); |
854 | if (ret) { | |
855 | err = -ENOMEM; | |
856 | goto free_resources; | |
857 | } | |
d16695a7 EG |
858 | unmap->from_cnt = 1; |
859 | unmap->len = PAGE_SIZE; | |
ff7b0479 SB |
860 | |
861 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
d16695a7 | 862 | src_count, PAGE_SIZE, 0); |
b8c01d25 EG |
863 | if (!tx) { |
864 | dev_err(dma_chan->device->dev, | |
865 | "Self-test cannot prepare operation, disabling\n"); | |
866 | err = -ENODEV; | |
867 | goto free_resources; | |
868 | } | |
ff7b0479 SB |
869 | |
870 | cookie = mv_xor_tx_submit(tx); | |
b8c01d25 EG |
871 | if (dma_submit_error(cookie)) { |
872 | dev_err(dma_chan->device->dev, | |
873 | "Self-test submit error, disabling\n"); | |
874 | err = -ENODEV; | |
875 | goto free_resources; | |
876 | } | |
877 | ||
ff7b0479 SB |
878 | mv_xor_issue_pending(dma_chan); |
879 | async_tx_ack(tx); | |
880 | msleep(8); | |
881 | ||
07934481 | 882 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 883 | DMA_COMPLETE) { |
a3fc74bc TP |
884 | dev_err(dma_chan->device->dev, |
885 | "Self-test xor timed out, disabling\n"); | |
ff7b0479 SB |
886 | err = -ENODEV; |
887 | goto free_resources; | |
888 | } | |
889 | ||
c35064c4 | 890 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
891 | PAGE_SIZE, DMA_FROM_DEVICE); |
892 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
893 | u32 *ptr = page_address(dest); | |
894 | if (ptr[i] != cmp_word) { | |
a3fc74bc | 895 | dev_err(dma_chan->device->dev, |
1ba151cd JP |
896 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
897 | i, ptr[i], cmp_word); | |
ff7b0479 SB |
898 | err = -ENODEV; |
899 | goto free_resources; | |
900 | } | |
901 | } | |
902 | ||
903 | free_resources: | |
d16695a7 | 904 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
905 | mv_xor_free_chan_resources(dma_chan); |
906 | out: | |
d16695a7 | 907 | src_idx = src_count; |
ff7b0479 SB |
908 | while (src_idx--) |
909 | __free_page(xor_srcs[src_idx]); | |
910 | __free_page(dest); | |
911 | return err; | |
912 | } | |
913 | ||
1ef48a26 | 914 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b0479 | 915 | { |
ff7b0479 | 916 | struct dma_chan *chan, *_chan; |
1ef48a26 | 917 | struct device *dev = mv_chan->dmadev.dev; |
ff7b0479 | 918 | |
1ef48a26 | 919 | dma_async_device_unregister(&mv_chan->dmadev); |
ff7b0479 | 920 | |
b503fa01 | 921 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 922 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
22843545 LA |
923 | dma_unmap_single(dev, mv_chan->dummy_src_addr, |
924 | MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
925 | dma_unmap_single(dev, mv_chan->dummy_dst_addr, | |
926 | MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
ff7b0479 | 927 | |
1ef48a26 | 928 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2 | 929 | device_node) { |
ff7b0479 SB |
930 | list_del(&chan->device_node); |
931 | } | |
932 | ||
88eb92cb TP |
933 | free_irq(mv_chan->irq, mv_chan); |
934 | ||
ff7b0479 SB |
935 | return 0; |
936 | } | |
937 | ||
1ef48a26 | 938 | static struct mv_xor_chan * |
297eedba | 939 | mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2 | 940 | struct platform_device *pdev, |
dd130c65 | 941 | int idx, dma_cap_mask_t cap_mask, int irq) |
ff7b0479 SB |
942 | { |
943 | int ret = 0; | |
ff7b0479 SB |
944 | struct mv_xor_chan *mv_chan; |
945 | struct dma_device *dma_dev; | |
ff7b0479 | 946 | |
1ef48a26 | 947 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f SK |
948 | if (!mv_chan) |
949 | return ERR_PTR(-ENOMEM); | |
ff7b0479 | 950 | |
9aedbdba | 951 | mv_chan->idx = idx; |
88eb92cb | 952 | mv_chan->irq = irq; |
dd130c65 GC |
953 | if (xordev->xor_type == XOR_ORION) |
954 | mv_chan->op_in_desc = XOR_MODE_IN_REG; | |
955 | else | |
956 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | |
ff7b0479 | 957 | |
1ef48a26 | 958 | dma_dev = &mv_chan->dmadev; |
ff7b0479 | 959 | |
22843545 LA |
960 | /* |
961 | * These source and destination dummy buffers are used to implement | |
962 | * a DMA_INTERRUPT operation as a minimum-sized XOR operation. | |
963 | * Hence, we only need to map the buffers at initialization-time. | |
964 | */ | |
965 | mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, | |
966 | mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
967 | mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, | |
968 | mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
969 | ||
ff7b0479 SB |
970 | /* allocate coherent memory for hardware descriptors |
971 | * note: writecombine gives slightly better performance, but | |
972 | * requires that we explicitly flush the writes | |
973 | */ | |
1ef48a26 | 974 | mv_chan->dma_desc_pool_virt = |
f6e45661 LR |
975 | dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, |
976 | GFP_KERNEL); | |
1ef48a26 | 977 | if (!mv_chan->dma_desc_pool_virt) |
a6b4a9d2 | 978 | return ERR_PTR(-ENOMEM); |
ff7b0479 SB |
979 | |
980 | /* discover transaction capabilites from the platform data */ | |
a6b4a9d2 | 981 | dma_dev->cap_mask = cap_mask; |
ff7b0479 SB |
982 | |
983 | INIT_LIST_HEAD(&dma_dev->channels); | |
984 | ||
985 | /* set base routines */ | |
986 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
987 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
07934481 | 988 | dma_dev->device_tx_status = mv_xor_status; |
ff7b0479 SB |
989 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
990 | dma_dev->dev = &pdev->dev; | |
991 | ||
992 | /* set prep routines based on capability */ | |
22843545 LA |
993 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
994 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | |
ff7b0479 SB |
995 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
996 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
ff7b0479 | 997 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894e | 998 | dma_dev->max_xor = 8; |
ff7b0479 SB |
999 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1000 | } | |
1001 | ||
297eedba | 1002 | mv_chan->mmr_base = xordev->xor_base; |
82a1402e | 1003 | mv_chan->mmr_high_base = xordev->xor_high_base; |
ff7b0479 SB |
1004 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
1005 | mv_chan); | |
1006 | ||
1007 | /* clear errors before enabling interrupts */ | |
0951e728 | 1008 | mv_chan_clear_err_status(mv_chan); |
ff7b0479 | 1009 | |
2d0a0745 TP |
1010 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1011 | 0, dev_name(&pdev->dev), mv_chan); | |
ff7b0479 SB |
1012 | if (ret) |
1013 | goto err_free_dma; | |
1014 | ||
1015 | mv_chan_unmask_interrupts(mv_chan); | |
1016 | ||
6f166312 | 1017 | if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) |
81aafb3e | 1018 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); |
6f166312 | 1019 | else |
81aafb3e | 1020 | mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); |
ff7b0479 SB |
1021 | |
1022 | spin_lock_init(&mv_chan->lock); | |
1023 | INIT_LIST_HEAD(&mv_chan->chain); | |
1024 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
fbea28a2 LA |
1025 | INIT_LIST_HEAD(&mv_chan->free_slots); |
1026 | INIT_LIST_HEAD(&mv_chan->allocated_slots); | |
98817b99 TP |
1027 | mv_chan->dmachan.device = dma_dev; |
1028 | dma_cookie_init(&mv_chan->dmachan); | |
ff7b0479 | 1029 | |
98817b99 | 1030 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b0479 SB |
1031 | |
1032 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
0951e728 | 1033 | ret = mv_chan_memcpy_self_test(mv_chan); |
ff7b0479 SB |
1034 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1035 | if (ret) | |
2d0a0745 | 1036 | goto err_free_irq; |
ff7b0479 SB |
1037 | } |
1038 | ||
1039 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
0951e728 | 1040 | ret = mv_chan_xor_self_test(mv_chan); |
ff7b0479 SB |
1041 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1042 | if (ret) | |
2d0a0745 | 1043 | goto err_free_irq; |
ff7b0479 SB |
1044 | } |
1045 | ||
6f166312 LA |
1046 | dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", |
1047 | mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", | |
1ba151cd | 1048 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cd JP |
1049 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1050 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
ff7b0479 SB |
1051 | |
1052 | dma_async_device_register(dma_dev); | |
1ef48a26 | 1053 | return mv_chan; |
ff7b0479 | 1054 | |
2d0a0745 TP |
1055 | err_free_irq: |
1056 | free_irq(mv_chan->irq, mv_chan); | |
ff7b0479 | 1057 | err_free_dma: |
b503fa01 | 1058 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1059 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2 | 1060 | return ERR_PTR(ret); |
ff7b0479 SB |
1061 | } |
1062 | ||
1063 | static void | |
297eedba | 1064 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b | 1065 | const struct mbus_dram_target_info *dram) |
ff7b0479 | 1066 | { |
82a1402e | 1067 | void __iomem *base = xordev->xor_high_base; |
ff7b0479 SB |
1068 | u32 win_enable = 0; |
1069 | int i; | |
1070 | ||
1071 | for (i = 0; i < 8; i++) { | |
1072 | writel(0, base + WINDOW_BASE(i)); | |
1073 | writel(0, base + WINDOW_SIZE(i)); | |
1074 | if (i < 4) | |
1075 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1076 | } | |
1077 | ||
1078 | for (i = 0; i < dram->num_cs; i++) { | |
63a9332b | 1079 | const struct mbus_dram_window *cs = dram->cs + i; |
ff7b0479 SB |
1080 | |
1081 | writel((cs->base & 0xffff0000) | | |
1082 | (cs->mbus_attr << 8) | | |
1083 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1084 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1085 | ||
1086 | win_enable |= (1 << i); | |
1087 | win_enable |= 3 << (16 + (2 * i)); | |
1088 | } | |
1089 | ||
1090 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1091 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
c4b4b732 TP |
1092 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); |
1093 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | |
ff7b0479 SB |
1094 | } |
1095 | ||
8b648436 TP |
1096 | /* |
1097 | * Since this XOR driver is basically used only for RAID5, we don't | |
1098 | * need to care about synchronizing ->suspend with DMA activity, | |
1099 | * because the DMA engine will naturally be quiet due to the block | |
1100 | * devices being suspended. | |
1101 | */ | |
1102 | static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) | |
1103 | { | |
1104 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); | |
1105 | int i; | |
1106 | ||
1107 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
1108 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | |
1109 | ||
1110 | if (!mv_chan) | |
1111 | continue; | |
1112 | ||
1113 | mv_chan->saved_config_reg = | |
1114 | readl_relaxed(XOR_CONFIG(mv_chan)); | |
1115 | mv_chan->saved_int_mask_reg = | |
1116 | readl_relaxed(XOR_INTR_MASK(mv_chan)); | |
1117 | } | |
1118 | ||
1119 | return 0; | |
1120 | } | |
1121 | ||
1122 | static int mv_xor_resume(struct platform_device *dev) | |
1123 | { | |
1124 | struct mv_xor_device *xordev = platform_get_drvdata(dev); | |
1125 | const struct mbus_dram_target_info *dram; | |
1126 | int i; | |
1127 | ||
1128 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
1129 | struct mv_xor_chan *mv_chan = xordev->channels[i]; | |
1130 | ||
1131 | if (!mv_chan) | |
1132 | continue; | |
1133 | ||
1134 | writel_relaxed(mv_chan->saved_config_reg, | |
1135 | XOR_CONFIG(mv_chan)); | |
1136 | writel_relaxed(mv_chan->saved_int_mask_reg, | |
1137 | XOR_INTR_MASK(mv_chan)); | |
1138 | } | |
1139 | ||
1140 | dram = mv_mbus_dram_info(); | |
1141 | if (dram) | |
1142 | mv_xor_conf_mbus_windows(xordev, dram); | |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
6f166312 | 1147 | static const struct of_device_id mv_xor_dt_ids[] = { |
dd130c65 GC |
1148 | { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, |
1149 | { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, | |
6f166312 LA |
1150 | {}, |
1151 | }; | |
6f166312 | 1152 | |
77757291 | 1153 | static unsigned int mv_xor_engine_count; |
6f166312 | 1154 | |
c2714334 | 1155 | static int mv_xor_probe(struct platform_device *pdev) |
ff7b0479 | 1156 | { |
63a9332b | 1157 | const struct mbus_dram_target_info *dram; |
297eedba | 1158 | struct mv_xor_device *xordev; |
d4adcc01 | 1159 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b0479 | 1160 | struct resource *res; |
77757291 | 1161 | unsigned int max_engines, max_channels; |
60d151f3 | 1162 | int i, ret; |
ff7b0479 | 1163 | |
1ba151cd | 1164 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
ff7b0479 | 1165 | |
297eedba TP |
1166 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1167 | if (!xordev) | |
ff7b0479 SB |
1168 | return -ENOMEM; |
1169 | ||
1170 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1171 | if (!res) | |
1172 | return -ENODEV; | |
1173 | ||
297eedba TP |
1174 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, |
1175 | resource_size(res)); | |
1176 | if (!xordev->xor_base) | |
ff7b0479 SB |
1177 | return -EBUSY; |
1178 | ||
1179 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1180 | if (!res) | |
1181 | return -ENODEV; | |
1182 | ||
297eedba TP |
1183 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1184 | resource_size(res)); | |
1185 | if (!xordev->xor_high_base) | |
ff7b0479 SB |
1186 | return -EBUSY; |
1187 | ||
297eedba | 1188 | platform_set_drvdata(pdev, xordev); |
ff7b0479 | 1189 | |
dd130c65 GC |
1190 | |
1191 | /* | |
1192 | * We need to know which type of XOR device we use before | |
1193 | * setting up. In non-dt case it can only be the legacy one. | |
1194 | */ | |
1195 | xordev->xor_type = XOR_ORION; | |
1196 | if (pdev->dev.of_node) { | |
1197 | const struct of_device_id *of_id = | |
1198 | of_match_device(mv_xor_dt_ids, | |
1199 | &pdev->dev); | |
1200 | ||
1201 | xordev->xor_type = (uintptr_t)of_id->data; | |
1202 | } | |
1203 | ||
ff7b0479 SB |
1204 | /* |
1205 | * (Re-)program MBUS remapping windows if we are asked to. | |
1206 | */ | |
63a9332b AL |
1207 | dram = mv_mbus_dram_info(); |
1208 | if (dram) | |
297eedba | 1209 | mv_xor_conf_mbus_windows(xordev, dram); |
ff7b0479 | 1210 | |
c510182b AL |
1211 | /* Not all platforms can gate the clock, so it is not |
1212 | * an error if the clock does not exists. | |
1213 | */ | |
297eedba TP |
1214 | xordev->clk = clk_get(&pdev->dev, NULL); |
1215 | if (!IS_ERR(xordev->clk)) | |
1216 | clk_prepare_enable(xordev->clk); | |
c510182b | 1217 | |
77757291 TP |
1218 | /* |
1219 | * We don't want to have more than one channel per CPU in | |
1220 | * order for async_tx to perform well. So we limit the number | |
1221 | * of engines and channels so that we take into account this | |
1222 | * constraint. Note that we also want to use channels from | |
1223 | * separate engines when possible. | |
1224 | */ | |
1225 | max_engines = num_present_cpus(); | |
1226 | max_channels = min_t(unsigned int, | |
1227 | MV_XOR_MAX_CHANNELS, | |
1228 | DIV_ROUND_UP(num_present_cpus(), 2)); | |
1229 | ||
1230 | if (mv_xor_engine_count >= max_engines) | |
1231 | return 0; | |
1232 | ||
f7d12ef5 TP |
1233 | if (pdev->dev.of_node) { |
1234 | struct device_node *np; | |
1235 | int i = 0; | |
1236 | ||
1237 | for_each_child_of_node(pdev->dev.of_node, np) { | |
0be8253f | 1238 | struct mv_xor_chan *chan; |
f7d12ef5 TP |
1239 | dma_cap_mask_t cap_mask; |
1240 | int irq; | |
1241 | ||
77757291 TP |
1242 | if (i >= max_channels) |
1243 | continue; | |
1244 | ||
f7d12ef5 | 1245 | dma_cap_zero(cap_mask); |
6d8f7abd TP |
1246 | dma_cap_set(DMA_MEMCPY, cap_mask); |
1247 | dma_cap_set(DMA_XOR, cap_mask); | |
1248 | dma_cap_set(DMA_INTERRUPT, cap_mask); | |
f7d12ef5 TP |
1249 | |
1250 | irq = irq_of_parse_and_map(np, 0); | |
f8eb9e7d TP |
1251 | if (!irq) { |
1252 | ret = -ENODEV; | |
f7d12ef5 TP |
1253 | goto err_channel_add; |
1254 | } | |
1255 | ||
0be8253f | 1256 | chan = mv_xor_channel_add(xordev, pdev, i, |
dd130c65 | 1257 | cap_mask, irq); |
0be8253f RK |
1258 | if (IS_ERR(chan)) { |
1259 | ret = PTR_ERR(chan); | |
f7d12ef5 TP |
1260 | irq_dispose_mapping(irq); |
1261 | goto err_channel_add; | |
1262 | } | |
1263 | ||
0be8253f | 1264 | xordev->channels[i] = chan; |
f7d12ef5 TP |
1265 | i++; |
1266 | } | |
1267 | } else if (pdata && pdata->channels) { | |
77757291 | 1268 | for (i = 0; i < max_channels; i++) { |
e39f6ec1 | 1269 | struct mv_xor_channel_data *cd; |
0be8253f | 1270 | struct mv_xor_chan *chan; |
60d151f3 TP |
1271 | int irq; |
1272 | ||
1273 | cd = &pdata->channels[i]; | |
1274 | if (!cd) { | |
1275 | ret = -ENODEV; | |
1276 | goto err_channel_add; | |
1277 | } | |
1278 | ||
1279 | irq = platform_get_irq(pdev, i); | |
1280 | if (irq < 0) { | |
1281 | ret = irq; | |
1282 | goto err_channel_add; | |
1283 | } | |
1284 | ||
0be8253f | 1285 | chan = mv_xor_channel_add(xordev, pdev, i, |
dd130c65 | 1286 | cd->cap_mask, irq); |
0be8253f RK |
1287 | if (IS_ERR(chan)) { |
1288 | ret = PTR_ERR(chan); | |
60d151f3 TP |
1289 | goto err_channel_add; |
1290 | } | |
0be8253f RK |
1291 | |
1292 | xordev->channels[i] = chan; | |
60d151f3 TP |
1293 | } |
1294 | } | |
c510182b | 1295 | |
ff7b0479 | 1296 | return 0; |
60d151f3 TP |
1297 | |
1298 | err_channel_add: | |
1299 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | |
f7d12ef5 | 1300 | if (xordev->channels[i]) { |
ab6e439f | 1301 | mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef5 TP |
1302 | if (pdev->dev.of_node) |
1303 | irq_dispose_mapping(xordev->channels[i]->irq); | |
f7d12ef5 | 1304 | } |
60d151f3 | 1305 | |
dab92064 TP |
1306 | if (!IS_ERR(xordev->clk)) { |
1307 | clk_disable_unprepare(xordev->clk); | |
1308 | clk_put(xordev->clk); | |
1309 | } | |
1310 | ||
60d151f3 | 1311 | return ret; |
ff7b0479 SB |
1312 | } |
1313 | ||
61971656 TP |
1314 | static struct platform_driver mv_xor_driver = { |
1315 | .probe = mv_xor_probe, | |
8b648436 TP |
1316 | .suspend = mv_xor_suspend, |
1317 | .resume = mv_xor_resume, | |
ff7b0479 | 1318 | .driver = { |
f7d12ef5 TP |
1319 | .name = MV_XOR_NAME, |
1320 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | |
ff7b0479 SB |
1321 | }, |
1322 | }; | |
1323 | ||
1324 | ||
1325 | static int __init mv_xor_init(void) | |
1326 | { | |
61971656 | 1327 | return platform_driver_register(&mv_xor_driver); |
ff7b0479 | 1328 | } |
25cf68da | 1329 | device_initcall(mv_xor_init); |
ff7b0479 | 1330 | |
25cf68da | 1331 | /* |
ff7b0479 SB |
1332 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); |
1333 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1334 | MODULE_LICENSE("GPL"); | |
25cf68da | 1335 | */ |