]>
Commit | Line | Data |
---|---|---|
95b4ecbf SY |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2014 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * The full GNU General Public License is included in this distribution in | |
16 | * the file called "COPYING". | |
17 | * | |
18 | * Intel MIC X100 DMA Driver. | |
19 | * | |
20 | * Adapted from IOAT dma driver. | |
21 | */ | |
22 | #include <linux/module.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/seq_file.h> | |
d6472302 | 25 | #include <linux/vmalloc.h> |
95b4ecbf SY |
26 | |
27 | #include "mic_x100_dma.h" | |
28 | ||
29 | #define MIC_DMA_MAX_XFER_SIZE_CARD (1 * 1024 * 1024 -\ | |
30 | MIC_DMA_ALIGN_BYTES) | |
31 | #define MIC_DMA_MAX_XFER_SIZE_HOST (1 * 1024 * 1024 >> 1) | |
32 | #define MIC_DMA_DESC_TYPE_SHIFT 60 | |
33 | #define MIC_DMA_MEMCPY_LEN_SHIFT 46 | |
34 | #define MIC_DMA_STAT_INTR_SHIFT 59 | |
35 | ||
36 | /* high-water mark for pushing dma descriptors */ | |
37 | static int mic_dma_pending_level = 4; | |
38 | ||
39 | /* Status descriptor is used to write a 64 bit value to a memory location */ | |
40 | enum mic_dma_desc_format_type { | |
41 | MIC_DMA_MEMCPY = 1, | |
42 | MIC_DMA_STATUS, | |
43 | }; | |
44 | ||
45 | static inline u32 mic_dma_hw_ring_inc(u32 val) | |
46 | { | |
47 | return (val + 1) % MIC_DMA_DESC_RX_SIZE; | |
48 | } | |
49 | ||
50 | static inline u32 mic_dma_hw_ring_dec(u32 val) | |
51 | { | |
52 | return val ? val - 1 : MIC_DMA_DESC_RX_SIZE - 1; | |
53 | } | |
54 | ||
55 | static inline void mic_dma_hw_ring_inc_head(struct mic_dma_chan *ch) | |
56 | { | |
57 | ch->head = mic_dma_hw_ring_inc(ch->head); | |
58 | } | |
59 | ||
60 | /* Prepare a memcpy desc */ | |
61 | static inline void mic_dma_memcpy_desc(struct mic_dma_desc *desc, | |
62 | dma_addr_t src_phys, dma_addr_t dst_phys, u64 size) | |
63 | { | |
64 | u64 qw0, qw1; | |
65 | ||
66 | qw0 = src_phys; | |
67 | qw0 |= (size >> MIC_DMA_ALIGN_SHIFT) << MIC_DMA_MEMCPY_LEN_SHIFT; | |
68 | qw1 = MIC_DMA_MEMCPY; | |
69 | qw1 <<= MIC_DMA_DESC_TYPE_SHIFT; | |
70 | qw1 |= dst_phys; | |
71 | desc->qw0 = qw0; | |
72 | desc->qw1 = qw1; | |
73 | } | |
74 | ||
75 | /* Prepare a status desc. with @data to be written at @dst_phys */ | |
76 | static inline void mic_dma_prep_status_desc(struct mic_dma_desc *desc, u64 data, | |
77 | dma_addr_t dst_phys, bool generate_intr) | |
78 | { | |
79 | u64 qw0, qw1; | |
80 | ||
81 | qw0 = data; | |
82 | qw1 = (u64) MIC_DMA_STATUS << MIC_DMA_DESC_TYPE_SHIFT | dst_phys; | |
83 | if (generate_intr) | |
84 | qw1 |= (1ULL << MIC_DMA_STAT_INTR_SHIFT); | |
85 | desc->qw0 = qw0; | |
86 | desc->qw1 = qw1; | |
87 | } | |
88 | ||
89 | static void mic_dma_cleanup(struct mic_dma_chan *ch) | |
90 | { | |
91 | struct dma_async_tx_descriptor *tx; | |
92 | u32 tail; | |
93 | u32 last_tail; | |
94 | ||
95 | spin_lock(&ch->cleanup_lock); | |
96 | tail = mic_dma_read_cmp_cnt(ch); | |
97 | /* | |
98 | * This is the barrier pair for smp_wmb() in fn. | |
99 | * mic_dma_tx_submit_unlock. It's required so that we read the | |
100 | * updated cookie value from tx->cookie. | |
101 | */ | |
102 | smp_rmb(); | |
103 | for (last_tail = ch->last_tail; tail != last_tail;) { | |
104 | tx = &ch->tx_array[last_tail]; | |
105 | if (tx->cookie) { | |
106 | dma_cookie_complete(tx); | |
7a883acd DJ |
107 | dmaengine_desc_get_callback_invoke(tx, NULL); |
108 | tx->callback = NULL; | |
95b4ecbf SY |
109 | } |
110 | last_tail = mic_dma_hw_ring_inc(last_tail); | |
111 | } | |
112 | /* finish all completion callbacks before incrementing tail */ | |
113 | smp_mb(); | |
114 | ch->last_tail = last_tail; | |
115 | spin_unlock(&ch->cleanup_lock); | |
116 | } | |
117 | ||
118 | static u32 mic_dma_ring_count(u32 head, u32 tail) | |
119 | { | |
120 | u32 count; | |
121 | ||
122 | if (head >= tail) | |
123 | count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); | |
124 | else | |
125 | count = tail - head; | |
126 | return count - 1; | |
127 | } | |
128 | ||
129 | /* Returns the num. of free descriptors on success, -ENOMEM on failure */ | |
130 | static int mic_dma_avail_desc_ring_space(struct mic_dma_chan *ch, int required) | |
131 | { | |
132 | struct device *dev = mic_dma_ch_to_device(ch); | |
133 | u32 count; | |
134 | ||
135 | count = mic_dma_ring_count(ch->head, ch->last_tail); | |
136 | if (count < required) { | |
137 | mic_dma_cleanup(ch); | |
138 | count = mic_dma_ring_count(ch->head, ch->last_tail); | |
139 | } | |
140 | ||
141 | if (count < required) { | |
142 | dev_dbg(dev, "Not enough desc space"); | |
143 | dev_dbg(dev, "%s %d required=%u, avail=%u\n", | |
144 | __func__, __LINE__, required, count); | |
145 | return -ENOMEM; | |
146 | } else { | |
147 | return count; | |
148 | } | |
149 | } | |
150 | ||
151 | /* Program memcpy descriptors into the descriptor ring and update s/w head ptr*/ | |
152 | static int mic_dma_prog_memcpy_desc(struct mic_dma_chan *ch, dma_addr_t src, | |
153 | dma_addr_t dst, size_t len) | |
154 | { | |
155 | size_t current_transfer_len; | |
156 | size_t max_xfer_size = to_mic_dma_dev(ch)->max_xfer_size; | |
157 | /* 3 is added to make sure we have enough space for status desc */ | |
158 | int num_desc = len / max_xfer_size + 3; | |
159 | int ret; | |
160 | ||
161 | if (len % max_xfer_size) | |
162 | num_desc++; | |
163 | ||
164 | ret = mic_dma_avail_desc_ring_space(ch, num_desc); | |
165 | if (ret < 0) | |
166 | return ret; | |
167 | do { | |
168 | current_transfer_len = min(len, max_xfer_size); | |
169 | mic_dma_memcpy_desc(&ch->desc_ring[ch->head], | |
170 | src, dst, current_transfer_len); | |
171 | mic_dma_hw_ring_inc_head(ch); | |
172 | len -= current_transfer_len; | |
173 | dst = dst + current_transfer_len; | |
174 | src = src + current_transfer_len; | |
175 | } while (len > 0); | |
176 | return 0; | |
177 | } | |
178 | ||
179 | /* It's a h/w quirk and h/w needs 2 status descriptors for every status desc */ | |
180 | static void mic_dma_prog_intr(struct mic_dma_chan *ch) | |
181 | { | |
182 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
183 | ch->status_dest_micpa, false); | |
184 | mic_dma_hw_ring_inc_head(ch); | |
185 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
186 | ch->status_dest_micpa, true); | |
187 | mic_dma_hw_ring_inc_head(ch); | |
188 | } | |
189 | ||
190 | /* Wrapper function to program memcpy descriptors/status descriptors */ | |
191 | static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, | |
192 | dma_addr_t dst, size_t len) | |
193 | { | |
ff39988a | 194 | if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) { |
95b4ecbf | 195 | return -ENOMEM; |
ff39988a SY |
196 | } else { |
197 | /* 3 is the maximum number of status descriptors */ | |
198 | int ret = mic_dma_avail_desc_ring_space(ch, 3); | |
199 | ||
200 | if (ret < 0) | |
201 | return ret; | |
202 | } | |
203 | ||
95b4ecbf SY |
204 | /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ |
205 | if (flags & DMA_PREP_FENCE) { | |
206 | mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, | |
207 | ch->status_dest_micpa, false); | |
208 | mic_dma_hw_ring_inc_head(ch); | |
209 | } | |
210 | ||
211 | if (flags & DMA_PREP_INTERRUPT) | |
212 | mic_dma_prog_intr(ch); | |
213 | ||
214 | return 0; | |
215 | } | |
216 | ||
217 | static inline void mic_dma_issue_pending(struct dma_chan *ch) | |
218 | { | |
219 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
220 | ||
221 | spin_lock(&mic_ch->issue_lock); | |
222 | /* | |
223 | * Write to head triggers h/w to act on the descriptors. | |
224 | * On MIC, writing the same head value twice causes | |
225 | * a h/w error. On second write, h/w assumes we filled | |
226 | * the entire ring & overwrote some of the descriptors. | |
227 | */ | |
228 | if (mic_ch->issued == mic_ch->submitted) | |
229 | goto out; | |
230 | mic_ch->issued = mic_ch->submitted; | |
231 | /* | |
232 | * make descriptor updates visible before advancing head, | |
233 | * this is purposefully not smp_wmb() since we are also | |
234 | * publishing the descriptor updates to a dma device | |
235 | */ | |
236 | wmb(); | |
237 | mic_dma_write_reg(mic_ch, MIC_DMA_REG_DHPR, mic_ch->issued); | |
238 | out: | |
239 | spin_unlock(&mic_ch->issue_lock); | |
240 | } | |
241 | ||
242 | static inline void mic_dma_update_pending(struct mic_dma_chan *ch) | |
243 | { | |
244 | if (mic_dma_ring_count(ch->issued, ch->submitted) | |
245 | > mic_dma_pending_level) | |
246 | mic_dma_issue_pending(&ch->api_ch); | |
247 | } | |
248 | ||
249 | static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |
250 | { | |
251 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan); | |
252 | dma_cookie_t cookie; | |
253 | ||
254 | dma_cookie_assign(tx); | |
255 | cookie = tx->cookie; | |
256 | /* | |
257 | * We need an smp write barrier here because another CPU might see | |
258 | * an update to submitted and update h/w head even before we | |
259 | * assigned a cookie to this tx. | |
260 | */ | |
261 | smp_wmb(); | |
262 | mic_ch->submitted = mic_ch->head; | |
263 | spin_unlock(&mic_ch->prep_lock); | |
264 | mic_dma_update_pending(mic_ch); | |
265 | return cookie; | |
266 | } | |
267 | ||
268 | static inline struct dma_async_tx_descriptor * | |
269 | allocate_tx(struct mic_dma_chan *ch) | |
270 | { | |
271 | u32 idx = mic_dma_hw_ring_dec(ch->head); | |
272 | struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; | |
273 | ||
274 | dma_async_tx_descriptor_init(tx, &ch->api_ch); | |
275 | tx->tx_submit = mic_dma_tx_submit_unlock; | |
276 | return tx; | |
277 | } | |
278 | ||
ff39988a SY |
279 | /* Program a status descriptor with dst as address and value to be written */ |
280 | static struct dma_async_tx_descriptor * | |
281 | mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val, | |
282 | unsigned long flags) | |
283 | { | |
284 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
285 | int result; | |
286 | ||
287 | spin_lock(&mic_ch->prep_lock); | |
288 | result = mic_dma_avail_desc_ring_space(mic_ch, 4); | |
289 | if (result < 0) | |
290 | goto error; | |
291 | mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst, | |
292 | false); | |
293 | mic_dma_hw_ring_inc_head(mic_ch); | |
294 | result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); | |
295 | if (result < 0) | |
296 | goto error; | |
297 | ||
298 | return allocate_tx(mic_ch); | |
299 | error: | |
300 | dev_err(mic_dma_ch_to_device(mic_ch), | |
301 | "Error enqueueing dma status descriptor, error=%d\n", result); | |
302 | spin_unlock(&mic_ch->prep_lock); | |
303 | return NULL; | |
304 | } | |
305 | ||
95b4ecbf SY |
306 | /* |
307 | * Prepare a memcpy descriptor to be added to the ring. | |
308 | * Note that the temporary descriptor adds an extra overhead of copying the | |
309 | * descriptor to ring. So, we copy directly to the descriptor ring | |
310 | */ | |
311 | static struct dma_async_tx_descriptor * | |
312 | mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, | |
313 | dma_addr_t dma_src, size_t len, unsigned long flags) | |
314 | { | |
315 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
316 | struct device *dev = mic_dma_ch_to_device(mic_ch); | |
317 | int result; | |
318 | ||
319 | if (!len && !flags) | |
320 | return NULL; | |
321 | ||
322 | spin_lock(&mic_ch->prep_lock); | |
323 | result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); | |
324 | if (result >= 0) | |
16605e8d AD |
325 | return allocate_tx(mic_ch); |
326 | dev_err(dev, "Error enqueueing dma, error=%d\n", result); | |
95b4ecbf | 327 | spin_unlock(&mic_ch->prep_lock); |
16605e8d | 328 | return NULL; |
95b4ecbf SY |
329 | } |
330 | ||
331 | static struct dma_async_tx_descriptor * | |
332 | mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) | |
333 | { | |
334 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
335 | int ret; | |
336 | ||
337 | spin_lock(&mic_ch->prep_lock); | |
338 | ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); | |
339 | if (!ret) | |
16605e8d | 340 | return allocate_tx(mic_ch); |
95b4ecbf | 341 | spin_unlock(&mic_ch->prep_lock); |
16605e8d | 342 | return NULL; |
95b4ecbf SY |
343 | } |
344 | ||
345 | /* Return the status of the transaction */ | |
346 | static enum dma_status | |
347 | mic_dma_tx_status(struct dma_chan *ch, dma_cookie_t cookie, | |
348 | struct dma_tx_state *txstate) | |
349 | { | |
350 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
351 | ||
352 | if (DMA_COMPLETE != dma_cookie_status(ch, cookie, txstate)) | |
353 | mic_dma_cleanup(mic_ch); | |
354 | ||
355 | return dma_cookie_status(ch, cookie, txstate); | |
356 | } | |
357 | ||
358 | static irqreturn_t mic_dma_thread_fn(int irq, void *data) | |
359 | { | |
360 | mic_dma_cleanup((struct mic_dma_chan *)data); | |
361 | return IRQ_HANDLED; | |
362 | } | |
363 | ||
364 | static irqreturn_t mic_dma_intr_handler(int irq, void *data) | |
365 | { | |
366 | struct mic_dma_chan *ch = ((struct mic_dma_chan *)data); | |
367 | ||
368 | mic_dma_ack_interrupt(ch); | |
369 | return IRQ_WAKE_THREAD; | |
370 | } | |
371 | ||
372 | static int mic_dma_alloc_desc_ring(struct mic_dma_chan *ch) | |
373 | { | |
374 | u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); | |
375 | struct device *dev = &to_mbus_device(ch)->dev; | |
376 | ||
377 | desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); | |
378 | ch->desc_ring = kzalloc(desc_ring_size, GFP_KERNEL); | |
379 | ||
380 | if (!ch->desc_ring) | |
381 | return -ENOMEM; | |
382 | ||
383 | ch->desc_ring_micpa = dma_map_single(dev, ch->desc_ring, | |
384 | desc_ring_size, DMA_BIDIRECTIONAL); | |
385 | if (dma_mapping_error(dev, ch->desc_ring_micpa)) | |
386 | goto map_error; | |
387 | ||
388 | ch->tx_array = vzalloc(MIC_DMA_DESC_RX_SIZE * sizeof(*ch->tx_array)); | |
389 | if (!ch->tx_array) | |
390 | goto tx_error; | |
391 | return 0; | |
392 | tx_error: | |
393 | dma_unmap_single(dev, ch->desc_ring_micpa, desc_ring_size, | |
394 | DMA_BIDIRECTIONAL); | |
395 | map_error: | |
396 | kfree(ch->desc_ring); | |
397 | return -ENOMEM; | |
398 | } | |
399 | ||
400 | static void mic_dma_free_desc_ring(struct mic_dma_chan *ch) | |
401 | { | |
402 | u64 desc_ring_size = MIC_DMA_DESC_RX_SIZE * sizeof(*ch->desc_ring); | |
403 | ||
404 | vfree(ch->tx_array); | |
405 | desc_ring_size = ALIGN(desc_ring_size, MIC_DMA_ALIGN_BYTES); | |
406 | dma_unmap_single(&to_mbus_device(ch)->dev, ch->desc_ring_micpa, | |
407 | desc_ring_size, DMA_BIDIRECTIONAL); | |
408 | kfree(ch->desc_ring); | |
409 | ch->desc_ring = NULL; | |
410 | } | |
411 | ||
412 | static void mic_dma_free_status_dest(struct mic_dma_chan *ch) | |
413 | { | |
414 | dma_unmap_single(&to_mbus_device(ch)->dev, ch->status_dest_micpa, | |
415 | L1_CACHE_BYTES, DMA_BIDIRECTIONAL); | |
416 | kfree(ch->status_dest); | |
417 | } | |
418 | ||
419 | static int mic_dma_alloc_status_dest(struct mic_dma_chan *ch) | |
420 | { | |
421 | struct device *dev = &to_mbus_device(ch)->dev; | |
422 | ||
423 | ch->status_dest = kzalloc(L1_CACHE_BYTES, GFP_KERNEL); | |
424 | if (!ch->status_dest) | |
425 | return -ENOMEM; | |
426 | ch->status_dest_micpa = dma_map_single(dev, ch->status_dest, | |
427 | L1_CACHE_BYTES, DMA_BIDIRECTIONAL); | |
428 | if (dma_mapping_error(dev, ch->status_dest_micpa)) { | |
429 | kfree(ch->status_dest); | |
430 | ch->status_dest = NULL; | |
431 | return -ENOMEM; | |
432 | } | |
433 | return 0; | |
434 | } | |
435 | ||
436 | static int mic_dma_check_chan(struct mic_dma_chan *ch) | |
437 | { | |
438 | if (mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR) || | |
439 | mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT) & MIC_DMA_CHAN_QUIESCE) { | |
440 | mic_dma_disable_chan(ch); | |
441 | mic_dma_chan_mask_intr(ch); | |
442 | dev_err(mic_dma_ch_to_device(ch), | |
443 | "%s %d error setting up mic dma chan %d\n", | |
444 | __func__, __LINE__, ch->ch_num); | |
445 | return -EBUSY; | |
446 | } | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static int mic_dma_chan_setup(struct mic_dma_chan *ch) | |
451 | { | |
452 | if (MIC_DMA_CHAN_MIC == ch->owner) | |
453 | mic_dma_chan_set_owner(ch); | |
454 | mic_dma_disable_chan(ch); | |
455 | mic_dma_chan_mask_intr(ch); | |
456 | mic_dma_write_reg(ch, MIC_DMA_REG_DCHERRMSK, 0); | |
457 | mic_dma_chan_set_desc_ring(ch); | |
458 | ch->last_tail = mic_dma_read_reg(ch, MIC_DMA_REG_DTPR); | |
459 | ch->head = ch->last_tail; | |
460 | ch->issued = 0; | |
461 | mic_dma_chan_unmask_intr(ch); | |
462 | mic_dma_enable_chan(ch); | |
463 | return mic_dma_check_chan(ch); | |
464 | } | |
465 | ||
466 | static void mic_dma_chan_destroy(struct mic_dma_chan *ch) | |
467 | { | |
468 | mic_dma_disable_chan(ch); | |
469 | mic_dma_chan_mask_intr(ch); | |
470 | } | |
471 | ||
472 | static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) | |
473 | { | |
474 | dma_async_device_unregister(&mic_dma_dev->dma_dev); | |
475 | } | |
476 | ||
477 | static int mic_dma_setup_irq(struct mic_dma_chan *ch) | |
478 | { | |
479 | ch->cookie = | |
480 | to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), | |
481 | mic_dma_intr_handler, mic_dma_thread_fn, | |
482 | "mic dma_channel", ch, ch->ch_num); | |
483 | if (IS_ERR(ch->cookie)) | |
d387ef02 | 484 | return PTR_ERR(ch->cookie); |
95b4ecbf SY |
485 | return 0; |
486 | } | |
487 | ||
488 | static inline void mic_dma_free_irq(struct mic_dma_chan *ch) | |
489 | { | |
490 | to_mbus_hw_ops(ch)->free_irq(to_mbus_device(ch), ch->cookie, ch); | |
491 | } | |
492 | ||
493 | static int mic_dma_chan_init(struct mic_dma_chan *ch) | |
494 | { | |
495 | int ret = mic_dma_alloc_desc_ring(ch); | |
496 | ||
497 | if (ret) | |
498 | goto ring_error; | |
499 | ret = mic_dma_alloc_status_dest(ch); | |
500 | if (ret) | |
501 | goto status_error; | |
502 | ret = mic_dma_chan_setup(ch); | |
503 | if (ret) | |
504 | goto chan_error; | |
505 | return ret; | |
506 | chan_error: | |
507 | mic_dma_free_status_dest(ch); | |
508 | status_error: | |
509 | mic_dma_free_desc_ring(ch); | |
510 | ring_error: | |
511 | return ret; | |
512 | } | |
513 | ||
514 | static int mic_dma_drain_chan(struct mic_dma_chan *ch) | |
515 | { | |
516 | struct dma_async_tx_descriptor *tx; | |
517 | int err = 0; | |
518 | dma_cookie_t cookie; | |
519 | ||
520 | tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); | |
521 | if (!tx) { | |
522 | err = -ENOMEM; | |
523 | goto error; | |
524 | } | |
525 | ||
526 | cookie = tx->tx_submit(tx); | |
527 | if (dma_submit_error(cookie)) | |
528 | err = -ENOMEM; | |
529 | else | |
530 | err = dma_sync_wait(&ch->api_ch, cookie); | |
531 | if (err) { | |
532 | dev_err(mic_dma_ch_to_device(ch), "%s %d TO chan 0x%x\n", | |
533 | __func__, __LINE__, ch->ch_num); | |
534 | err = -EIO; | |
535 | } | |
536 | error: | |
537 | mic_dma_cleanup(ch); | |
538 | return err; | |
539 | } | |
540 | ||
541 | static inline void mic_dma_chan_uninit(struct mic_dma_chan *ch) | |
542 | { | |
543 | mic_dma_chan_destroy(ch); | |
544 | mic_dma_cleanup(ch); | |
545 | mic_dma_free_status_dest(ch); | |
546 | mic_dma_free_desc_ring(ch); | |
547 | } | |
548 | ||
549 | static int mic_dma_init(struct mic_dma_device *mic_dma_dev, | |
550 | enum mic_dma_chan_owner owner) | |
551 | { | |
552 | int i, first_chan = mic_dma_dev->start_ch; | |
553 | struct mic_dma_chan *ch; | |
554 | int ret; | |
555 | ||
556 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
95b4ecbf | 557 | ch = &mic_dma_dev->mic_ch[i]; |
95b4ecbf SY |
558 | ch->ch_num = i; |
559 | ch->owner = owner; | |
560 | spin_lock_init(&ch->cleanup_lock); | |
561 | spin_lock_init(&ch->prep_lock); | |
562 | spin_lock_init(&ch->issue_lock); | |
563 | ret = mic_dma_setup_irq(ch); | |
564 | if (ret) | |
565 | goto error; | |
566 | } | |
567 | return 0; | |
568 | error: | |
569 | for (i = i - 1; i >= first_chan; i--) | |
570 | mic_dma_free_irq(ch); | |
571 | return ret; | |
572 | } | |
573 | ||
574 | static void mic_dma_uninit(struct mic_dma_device *mic_dma_dev) | |
575 | { | |
576 | int i, first_chan = mic_dma_dev->start_ch; | |
577 | struct mic_dma_chan *ch; | |
578 | ||
579 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
580 | ch = &mic_dma_dev->mic_ch[i]; | |
581 | mic_dma_free_irq(ch); | |
582 | } | |
583 | } | |
584 | ||
585 | static int mic_dma_alloc_chan_resources(struct dma_chan *ch) | |
586 | { | |
587 | int ret = mic_dma_chan_init(to_mic_dma_chan(ch)); | |
588 | if (ret) | |
589 | return ret; | |
590 | return MIC_DMA_DESC_RX_SIZE; | |
591 | } | |
592 | ||
593 | static void mic_dma_free_chan_resources(struct dma_chan *ch) | |
594 | { | |
595 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | |
596 | mic_dma_drain_chan(mic_ch); | |
597 | mic_dma_chan_uninit(mic_ch); | |
598 | } | |
599 | ||
600 | /* Set the fn. handlers and register the dma device with dma api */ | |
601 | static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, | |
602 | enum mic_dma_chan_owner owner) | |
603 | { | |
604 | int i, first_chan = mic_dma_dev->start_ch; | |
605 | ||
606 | dma_cap_zero(mic_dma_dev->dma_dev.cap_mask); | |
607 | /* | |
608 | * This dma engine is not capable of host memory to host memory | |
609 | * transfers | |
610 | */ | |
611 | dma_cap_set(DMA_MEMCPY, mic_dma_dev->dma_dev.cap_mask); | |
612 | ||
613 | if (MIC_DMA_CHAN_HOST == owner) | |
614 | dma_cap_set(DMA_PRIVATE, mic_dma_dev->dma_dev.cap_mask); | |
615 | mic_dma_dev->dma_dev.device_alloc_chan_resources = | |
616 | mic_dma_alloc_chan_resources; | |
617 | mic_dma_dev->dma_dev.device_free_chan_resources = | |
618 | mic_dma_free_chan_resources; | |
619 | mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; | |
620 | mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; | |
ff39988a SY |
621 | mic_dma_dev->dma_dev.device_prep_dma_imm_data = |
622 | mic_dma_prep_status_lock; | |
95b4ecbf SY |
623 | mic_dma_dev->dma_dev.device_prep_dma_interrupt = |
624 | mic_dma_prep_interrupt_lock; | |
625 | mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; | |
626 | mic_dma_dev->dma_dev.copy_align = MIC_DMA_ALIGN_SHIFT; | |
627 | INIT_LIST_HEAD(&mic_dma_dev->dma_dev.channels); | |
628 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
629 | mic_dma_dev->mic_ch[i].api_ch.device = &mic_dma_dev->dma_dev; | |
630 | dma_cookie_init(&mic_dma_dev->mic_ch[i].api_ch); | |
631 | list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, | |
632 | &mic_dma_dev->dma_dev.channels); | |
633 | } | |
634 | return dma_async_device_register(&mic_dma_dev->dma_dev); | |
635 | } | |
636 | ||
637 | /* | |
638 | * Initializes dma channels and registers the dma device with the | |
639 | * dma engine api. | |
640 | */ | |
641 | static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev, | |
642 | enum mic_dma_chan_owner owner) | |
643 | { | |
644 | struct mic_dma_device *mic_dma_dev; | |
645 | int ret; | |
646 | struct device *dev = &mbdev->dev; | |
647 | ||
648 | mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); | |
649 | if (!mic_dma_dev) { | |
650 | ret = -ENOMEM; | |
651 | goto alloc_error; | |
652 | } | |
653 | mic_dma_dev->mbdev = mbdev; | |
654 | mic_dma_dev->dma_dev.dev = dev; | |
655 | mic_dma_dev->mmio = mbdev->mmio_va; | |
656 | if (MIC_DMA_CHAN_HOST == owner) { | |
657 | mic_dma_dev->start_ch = 0; | |
658 | mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_HOST; | |
659 | } else { | |
660 | mic_dma_dev->start_ch = 4; | |
661 | mic_dma_dev->max_xfer_size = MIC_DMA_MAX_XFER_SIZE_CARD; | |
662 | } | |
663 | ret = mic_dma_init(mic_dma_dev, owner); | |
664 | if (ret) | |
665 | goto init_error; | |
666 | ret = mic_dma_register_dma_device(mic_dma_dev, owner); | |
667 | if (ret) | |
668 | goto reg_error; | |
669 | return mic_dma_dev; | |
670 | reg_error: | |
671 | mic_dma_uninit(mic_dma_dev); | |
672 | init_error: | |
673 | kfree(mic_dma_dev); | |
674 | mic_dma_dev = NULL; | |
675 | alloc_error: | |
676 | dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); | |
677 | return mic_dma_dev; | |
678 | } | |
679 | ||
680 | static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) | |
681 | { | |
682 | mic_dma_unregister_dma_device(mic_dma_dev); | |
683 | mic_dma_uninit(mic_dma_dev); | |
684 | kfree(mic_dma_dev); | |
685 | } | |
686 | ||
687 | /* DEBUGFS CODE */ | |
688 | static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) | |
689 | { | |
690 | struct mic_dma_device *mic_dma_dev = s->private; | |
691 | int i, chan_num, first_chan = mic_dma_dev->start_ch; | |
692 | struct mic_dma_chan *ch; | |
693 | ||
694 | seq_printf(s, "SBOX_DCR: %#x\n", | |
695 | mic_dma_mmio_read(&mic_dma_dev->mic_ch[first_chan], | |
696 | MIC_DMA_SBOX_BASE + MIC_DMA_SBOX_DCR)); | |
697 | seq_puts(s, "DMA Channel Registers\n"); | |
698 | seq_printf(s, "%-10s| %-10s %-10s %-10s %-10s %-10s", | |
699 | "Channel", "DCAR", "DTPR", "DHPR", "DRAR_HI", "DRAR_LO"); | |
700 | seq_printf(s, " %-11s %-14s %-10s\n", "DCHERR", "DCHERRMSK", "DSTAT"); | |
701 | for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) { | |
702 | ch = &mic_dma_dev->mic_ch[i]; | |
703 | chan_num = ch->ch_num; | |
704 | seq_printf(s, "%-10i| %-#10x %-#10x %-#10x %-#10x", | |
705 | chan_num, | |
706 | mic_dma_read_reg(ch, MIC_DMA_REG_DCAR), | |
707 | mic_dma_read_reg(ch, MIC_DMA_REG_DTPR), | |
708 | mic_dma_read_reg(ch, MIC_DMA_REG_DHPR), | |
709 | mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_HI)); | |
710 | seq_printf(s, " %-#10x %-#10x %-#14x %-#10x\n", | |
711 | mic_dma_read_reg(ch, MIC_DMA_REG_DRAR_LO), | |
712 | mic_dma_read_reg(ch, MIC_DMA_REG_DCHERR), | |
713 | mic_dma_read_reg(ch, MIC_DMA_REG_DCHERRMSK), | |
714 | mic_dma_read_reg(ch, MIC_DMA_REG_DSTAT)); | |
715 | } | |
716 | return 0; | |
717 | } | |
718 | ||
719 | static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) | |
720 | { | |
721 | return single_open(file, mic_dma_reg_seq_show, inode->i_private); | |
722 | } | |
723 | ||
724 | static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) | |
725 | { | |
726 | return single_release(inode, file); | |
727 | } | |
728 | ||
729 | static const struct file_operations mic_dma_reg_ops = { | |
730 | .owner = THIS_MODULE, | |
731 | .open = mic_dma_reg_debug_open, | |
732 | .read = seq_read, | |
733 | .llseek = seq_lseek, | |
734 | .release = mic_dma_reg_debug_release | |
735 | }; | |
736 | ||
737 | /* Debugfs parent dir */ | |
738 | static struct dentry *mic_dma_dbg; | |
739 | ||
740 | static int mic_dma_driver_probe(struct mbus_device *mbdev) | |
741 | { | |
742 | struct mic_dma_device *mic_dma_dev; | |
743 | enum mic_dma_chan_owner owner; | |
744 | ||
745 | if (MBUS_DEV_DMA_MIC == mbdev->id.device) | |
746 | owner = MIC_DMA_CHAN_MIC; | |
747 | else | |
748 | owner = MIC_DMA_CHAN_HOST; | |
749 | ||
750 | mic_dma_dev = mic_dma_dev_reg(mbdev, owner); | |
751 | dev_set_drvdata(&mbdev->dev, mic_dma_dev); | |
752 | ||
753 | if (mic_dma_dbg) { | |
754 | mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), | |
755 | mic_dma_dbg); | |
756 | if (mic_dma_dev->dbg_dir) | |
757 | debugfs_create_file("mic_dma_reg", 0444, | |
758 | mic_dma_dev->dbg_dir, mic_dma_dev, | |
759 | &mic_dma_reg_ops); | |
760 | } | |
761 | return 0; | |
762 | } | |
763 | ||
764 | static void mic_dma_driver_remove(struct mbus_device *mbdev) | |
765 | { | |
766 | struct mic_dma_device *mic_dma_dev; | |
767 | ||
768 | mic_dma_dev = dev_get_drvdata(&mbdev->dev); | |
769 | debugfs_remove_recursive(mic_dma_dev->dbg_dir); | |
770 | mic_dma_dev_unreg(mic_dma_dev); | |
771 | } | |
772 | ||
773 | static struct mbus_device_id id_table[] = { | |
774 | {MBUS_DEV_DMA_MIC, MBUS_DEV_ANY_ID}, | |
775 | {MBUS_DEV_DMA_HOST, MBUS_DEV_ANY_ID}, | |
776 | {0}, | |
777 | }; | |
778 | ||
779 | static struct mbus_driver mic_dma_driver = { | |
780 | .driver.name = KBUILD_MODNAME, | |
781 | .driver.owner = THIS_MODULE, | |
782 | .id_table = id_table, | |
783 | .probe = mic_dma_driver_probe, | |
784 | .remove = mic_dma_driver_remove, | |
785 | }; | |
786 | ||
787 | static int __init mic_x100_dma_init(void) | |
788 | { | |
789 | int rc = mbus_register_driver(&mic_dma_driver); | |
790 | if (rc) | |
791 | return rc; | |
792 | mic_dma_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); | |
793 | return 0; | |
794 | } | |
795 | ||
796 | static void __exit mic_x100_dma_exit(void) | |
797 | { | |
798 | debugfs_remove_recursive(mic_dma_dbg); | |
799 | mbus_unregister_driver(&mic_dma_driver); | |
800 | } | |
801 | ||
802 | module_init(mic_x100_dma_init); | |
803 | module_exit(mic_x100_dma_exit); | |
804 | ||
805 | MODULE_DEVICE_TABLE(mbus, id_table); | |
806 | MODULE_AUTHOR("Intel Corporation"); | |
807 | MODULE_DESCRIPTION("Intel(R) MIC X100 DMA Driver"); | |
808 | MODULE_LICENSE("GPL v2"); |