]>
Commit | Line | Data |
---|---|---|
b3c567e4 VK |
1 | /* |
2 | * intel_mid_dma.c - Intel Langwell DMA Drivers | |
3 | * | |
4 | * Copyright (C) 2008-10 Intel Corp | |
5 | * Author: Vinod Koul <vinod.koul@intel.com> | |
6 | * The driver design is based on dw_dmac driver | |
7 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; version 2 of the License. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | |
21 | * | |
22 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
23 | * | |
24 | * | |
25 | */ | |
26 | #include <linux/pci.h> | |
27 | #include <linux/interrupt.h> | |
53a61bad | 28 | #include <linux/pm_runtime.h> |
b3c567e4 | 29 | #include <linux/intel_mid_dma.h> |
7c52d551 | 30 | #include <linux/module.h> |
b3c567e4 | 31 | |
d2ebfb33 RKAL |
32 | #include "dmaengine.h" |
33 | ||
b3c567e4 VK |
34 | #define MAX_CHAN 4 /*max ch across controllers*/ |
35 | #include "intel_mid_dma_regs.h" | |
36 | ||
37 | #define INTEL_MID_DMAC1_ID 0x0814 | |
38 | #define INTEL_MID_DMAC2_ID 0x0813 | |
39 | #define INTEL_MID_GP_DMAC2_ID 0x0827 | |
40 | #define INTEL_MFLD_DMAC1_ID 0x0830 | |
41 | #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 | |
42 | #define LNW_PERIPHRAL_MASK_SIZE 0x10 | |
43 | #define LNW_PERIPHRAL_STATUS 0x0 | |
44 | #define LNW_PERIPHRAL_MASK 0x8 | |
45 | ||
46 | struct intel_mid_dma_probe_info { | |
47 | u8 max_chan; | |
48 | u8 ch_base; | |
49 | u16 block_size; | |
50 | u32 pimr_mask; | |
51 | }; | |
52 | ||
53 | #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ | |
54 | ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ | |
55 | .max_chan = (_max_chan), \ | |
56 | .ch_base = (_ch_base), \ | |
57 | .block_size = (_block_size), \ | |
58 | .pimr_mask = (_pimr_mask), \ | |
59 | }) | |
60 | ||
61 | /***************************************************************************** | |
62 | Utility Functions*/ | |
63 | /** | |
64 | * get_ch_index - convert status to channel | |
65 | * @status: status mask | |
66 | * @base: dma ch base value | |
67 | * | |
68 | * Modify the status mask and return the channel index needing | |
69 | * attention (or -1 if neither) | |
70 | */ | |
71 | static int get_ch_index(int *status, unsigned int base) | |
72 | { | |
73 | int i; | |
74 | for (i = 0; i < MAX_CHAN; i++) { | |
75 | if (*status & (1 << (i + base))) { | |
76 | *status = *status & ~(1 << (i + base)); | |
77 | pr_debug("MDMA: index %d New status %x\n", i, *status); | |
78 | return i; | |
79 | } | |
80 | } | |
81 | return -1; | |
82 | } | |
83 | ||
84 | /** | |
85 | * get_block_ts - calculates dma transaction length | |
86 | * @len: dma transfer length | |
87 | * @tx_width: dma transfer src width | |
88 | * @block_size: dma controller max block size | |
89 | * | |
90 | * Based on src width calculate the DMA trsaction length in data items | |
91 | * return data items or FFFF if exceeds max length for block | |
92 | */ | |
93 | static int get_block_ts(int len, int tx_width, int block_size) | |
94 | { | |
95 | int byte_width = 0, block_ts = 0; | |
96 | ||
97 | switch (tx_width) { | |
20dd6390 | 98 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
b3c567e4 VK |
99 | byte_width = 1; |
100 | break; | |
20dd6390 | 101 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
b3c567e4 VK |
102 | byte_width = 2; |
103 | break; | |
20dd6390 | 104 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
b3c567e4 VK |
105 | default: |
106 | byte_width = 4; | |
107 | break; | |
108 | } | |
109 | ||
110 | block_ts = len/byte_width; | |
111 | if (block_ts > block_size) | |
112 | block_ts = 0xFFFF; | |
113 | return block_ts; | |
114 | } | |
115 | ||
116 | /***************************************************************************** | |
117 | DMAC1 interrupt Functions*/ | |
118 | ||
119 | /** | |
120 | * dmac1_mask_periphral_intr - mask the periphral interrupt | |
4598fc2c | 121 | * @mid: dma device for which masking is required |
b3c567e4 VK |
122 | * |
123 | * Masks the DMA periphral interrupt | |
124 | * this is valid for DMAC1 family controllers only | |
125 | * This controller should have periphral mask registers already mapped | |
126 | */ | |
4598fc2c | 127 | static void dmac1_mask_periphral_intr(struct middma_device *mid) |
b3c567e4 VK |
128 | { |
129 | u32 pimr; | |
b3c567e4 VK |
130 | |
131 | if (mid->pimr_mask) { | |
132 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | |
133 | pimr |= mid->pimr_mask; | |
134 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | |
135 | } | |
136 | return; | |
137 | } | |
138 | ||
139 | /** | |
140 | * dmac1_unmask_periphral_intr - unmask the periphral interrupt | |
141 | * @midc: dma channel for which masking is required | |
142 | * | |
143 | * UnMasks the DMA periphral interrupt, | |
144 | * this is valid for DMAC1 family controllers only | |
145 | * This controller should have periphral mask registers already mapped | |
146 | */ | |
147 | static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) | |
148 | { | |
149 | u32 pimr; | |
150 | struct middma_device *mid = to_middma_device(midc->chan.device); | |
151 | ||
152 | if (mid->pimr_mask) { | |
153 | pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); | |
154 | pimr &= ~mid->pimr_mask; | |
155 | writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); | |
156 | } | |
157 | return; | |
158 | } | |
159 | ||
160 | /** | |
161 | * enable_dma_interrupt - enable the periphral interrupt | |
162 | * @midc: dma channel for which enable interrupt is required | |
163 | * | |
164 | * Enable the DMA periphral interrupt, | |
165 | * this is valid for DMAC1 family controllers only | |
166 | * This controller should have periphral mask registers already mapped | |
167 | */ | |
168 | static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) | |
169 | { | |
170 | dmac1_unmask_periphral_intr(midc); | |
171 | ||
172 | /*en ch interrupts*/ | |
173 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | |
174 | iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | |
175 | return; | |
176 | } | |
177 | ||
178 | /** | |
179 | * disable_dma_interrupt - disable the periphral interrupt | |
180 | * @midc: dma channel for which disable interrupt is required | |
181 | * | |
182 | * Disable the DMA periphral interrupt, | |
183 | * this is valid for DMAC1 family controllers only | |
184 | * This controller should have periphral mask registers already mapped | |
185 | */ | |
186 | static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) | |
187 | { | |
188 | /*Check LPE PISR, make sure fwd is disabled*/ | |
b3c567e4 VK |
189 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); |
190 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); | |
191 | iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); | |
192 | return; | |
193 | } | |
194 | ||
195 | /***************************************************************************** | |
196 | DMA channel helper Functions*/ | |
197 | /** | |
198 | * mid_desc_get - get a descriptor | |
199 | * @midc: dma channel for which descriptor is required | |
200 | * | |
201 | * Obtain a descriptor for the channel. Returns NULL if none are free. | |
202 | * Once the descriptor is returned it is private until put on another | |
203 | * list or freed | |
204 | */ | |
205 | static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) | |
206 | { | |
207 | struct intel_mid_dma_desc *desc, *_desc; | |
208 | struct intel_mid_dma_desc *ret = NULL; | |
209 | ||
210 | spin_lock_bh(&midc->lock); | |
211 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | |
212 | if (async_tx_test_ack(&desc->txd)) { | |
213 | list_del(&desc->desc_node); | |
214 | ret = desc; | |
215 | break; | |
216 | } | |
217 | } | |
218 | spin_unlock_bh(&midc->lock); | |
219 | return ret; | |
220 | } | |
221 | ||
222 | /** | |
223 | * mid_desc_put - put a descriptor | |
224 | * @midc: dma channel for which descriptor is required | |
225 | * @desc: descriptor to put | |
226 | * | |
227 | * Return a descriptor from lwn_desc_get back to the free pool | |
228 | */ | |
229 | static void midc_desc_put(struct intel_mid_dma_chan *midc, | |
230 | struct intel_mid_dma_desc *desc) | |
231 | { | |
232 | if (desc) { | |
233 | spin_lock_bh(&midc->lock); | |
234 | list_add_tail(&desc->desc_node, &midc->free_list); | |
235 | spin_unlock_bh(&midc->lock); | |
236 | } | |
237 | } | |
238 | /** | |
239 | * midc_dostart - begin a DMA transaction | |
240 | * @midc: channel for which txn is to be started | |
241 | * @first: first descriptor of series | |
242 | * | |
243 | * Load a transaction into the engine. This must be called with midc->lock | |
244 | * held and bh disabled. | |
245 | */ | |
246 | static void midc_dostart(struct intel_mid_dma_chan *midc, | |
247 | struct intel_mid_dma_desc *first) | |
248 | { | |
249 | struct middma_device *mid = to_middma_device(midc->chan.device); | |
250 | ||
251 | /* channel is idle */ | |
53a61bad | 252 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { |
b3c567e4 VK |
253 | /*error*/ |
254 | pr_err("ERR_MDMA: channel is busy in start\n"); | |
255 | /* The tasklet will hopefully advance the queue... */ | |
256 | return; | |
257 | } | |
53a61bad | 258 | midc->busy = true; |
b3c567e4 VK |
259 | /*write registers and en*/ |
260 | iowrite32(first->sar, midc->ch_regs + SAR); | |
261 | iowrite32(first->dar, midc->ch_regs + DAR); | |
576e3c39 | 262 | iowrite32(first->lli_phys, midc->ch_regs + LLP); |
b3c567e4 VK |
263 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); |
264 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | |
265 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | |
266 | iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); | |
267 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | |
268 | (int)first->sar, (int)first->dar, first->cfg_hi, | |
269 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | |
576e3c39 | 270 | first->status = DMA_IN_PROGRESS; |
b3c567e4 VK |
271 | |
272 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | |
b3c567e4 VK |
273 | } |
274 | ||
275 | /** | |
276 | * midc_descriptor_complete - process completed descriptor | |
277 | * @midc: channel owning the descriptor | |
278 | * @desc: the descriptor itself | |
279 | * | |
280 | * Process a completed descriptor and perform any callbacks upon | |
281 | * the completion. The completion handling drops the lock during the | |
282 | * callbacks but must be called with the lock held. | |
283 | */ | |
284 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |
1fded075 AH |
285 | struct intel_mid_dma_desc *desc) |
286 | __releases(&midc->lock) __acquires(&midc->lock) | |
b3c567e4 VK |
287 | { |
288 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
289 | dma_async_tx_callback callback_txd = NULL; | |
576e3c39 | 290 | struct intel_mid_dma_lli *llitem; |
b3c567e4 VK |
291 | void *param_txd = NULL; |
292 | ||
f7fbce07 | 293 | dma_cookie_complete(txd); |
b3c567e4 VK |
294 | callback_txd = txd->callback; |
295 | param_txd = txd->callback_param; | |
296 | ||
576e3c39 RB |
297 | if (desc->lli != NULL) { |
298 | /*clear the DONE bit of completed LLI in memory*/ | |
299 | llitem = desc->lli + desc->current_lli; | |
300 | llitem->ctl_hi &= CLEAR_DONE; | |
301 | if (desc->current_lli < desc->lli_length-1) | |
302 | (desc->current_lli)++; | |
303 | else | |
304 | desc->current_lli = 0; | |
305 | } | |
b3c567e4 VK |
306 | spin_unlock_bh(&midc->lock); |
307 | if (callback_txd) { | |
308 | pr_debug("MDMA: TXD callback set ... calling\n"); | |
309 | callback_txd(param_txd); | |
576e3c39 RB |
310 | } |
311 | if (midc->raw_tfr) { | |
312 | desc->status = DMA_SUCCESS; | |
313 | if (desc->lli != NULL) { | |
314 | pci_pool_free(desc->lli_pool, desc->lli, | |
315 | desc->lli_phys); | |
316 | pci_pool_destroy(desc->lli_pool); | |
1fded075 | 317 | desc->lli = NULL; |
576e3c39 RB |
318 | } |
319 | list_move(&desc->desc_node, &midc->free_list); | |
320 | midc->busy = false; | |
b3c567e4 VK |
321 | } |
322 | spin_lock_bh(&midc->lock); | |
323 | ||
324 | } | |
325 | /** | |
326 | * midc_scan_descriptors - check the descriptors in channel | |
327 | * mark completed when tx is completete | |
328 | * @mid: device | |
329 | * @midc: channel to scan | |
330 | * | |
331 | * Walk the descriptor chain for the device and process any entries | |
332 | * that are complete. | |
333 | */ | |
334 | static void midc_scan_descriptors(struct middma_device *mid, | |
335 | struct intel_mid_dma_chan *midc) | |
336 | { | |
337 | struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; | |
338 | ||
339 | /*tx is complete*/ | |
340 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | |
576e3c39 | 341 | if (desc->status == DMA_IN_PROGRESS) |
b3c567e4 | 342 | midc_descriptor_complete(midc, desc); |
b3c567e4 VK |
343 | } |
344 | return; | |
576e3c39 RB |
345 | } |
346 | /** | |
347 | * midc_lli_fill_sg - Helper function to convert | |
348 | * SG list to Linked List Items. | |
349 | *@midc: Channel | |
350 | *@desc: DMA descriptor | |
351 | *@sglist: Pointer to SG list | |
352 | *@sglen: SG list length | |
353 | *@flags: DMA transaction flags | |
354 | * | |
355 | * Walk through the SG list and convert the SG list into Linked | |
356 | * List Items (LLI). | |
357 | */ | |
358 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | |
359 | struct intel_mid_dma_desc *desc, | |
360 | struct scatterlist *sglist, | |
361 | unsigned int sglen, | |
362 | unsigned int flags) | |
363 | { | |
364 | struct intel_mid_dma_slave *mids; | |
365 | struct scatterlist *sg; | |
366 | dma_addr_t lli_next, sg_phy_addr; | |
367 | struct intel_mid_dma_lli *lli_bloc_desc; | |
368 | union intel_mid_dma_ctl_lo ctl_lo; | |
369 | union intel_mid_dma_ctl_hi ctl_hi; | |
370 | int i; | |
371 | ||
372 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | |
20dd6390 | 373 | mids = midc->mid_slave; |
576e3c39 RB |
374 | |
375 | lli_bloc_desc = desc->lli; | |
376 | lli_next = desc->lli_phys; | |
b3c567e4 | 377 | |
576e3c39 RB |
378 | ctl_lo.ctl_lo = desc->ctl_lo; |
379 | ctl_hi.ctl_hi = desc->ctl_hi; | |
380 | for_each_sg(sglist, sg, sglen, i) { | |
381 | /*Populate CTL_LOW and LLI values*/ | |
382 | if (i != sglen - 1) { | |
383 | lli_next = lli_next + | |
384 | sizeof(struct intel_mid_dma_lli); | |
385 | } else { | |
386 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | |
387 | if (flags & DMA_PREP_CIRCULAR_LIST) { | |
388 | pr_debug("MDMA: LLI is configured in circular mode\n"); | |
389 | lli_next = desc->lli_phys; | |
390 | } else { | |
391 | lli_next = 0; | |
392 | ctl_lo.ctlx.llp_dst_en = 0; | |
393 | ctl_lo.ctlx.llp_src_en = 0; | |
394 | } | |
395 | } | |
396 | /*Populate CTL_HI values*/ | |
397 | ctl_hi.ctlx.block_ts = get_block_ts(sg->length, | |
398 | desc->width, | |
399 | midc->dma->block_size); | |
400 | /*Populate SAR and DAR values*/ | |
401 | sg_phy_addr = sg_phys(sg); | |
db8196df | 402 | if (desc->dirn == DMA_MEM_TO_DEV) { |
576e3c39 | 403 | lli_bloc_desc->sar = sg_phy_addr; |
20dd6390 | 404 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; |
db8196df | 405 | } else if (desc->dirn == DMA_DEV_TO_MEM) { |
20dd6390 | 406 | lli_bloc_desc->sar = mids->dma_slave.src_addr; |
576e3c39 RB |
407 | lli_bloc_desc->dar = sg_phy_addr; |
408 | } | |
409 | /*Copy values into block descriptor in system memroy*/ | |
410 | lli_bloc_desc->llp = lli_next; | |
411 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | |
412 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | |
413 | ||
414 | lli_bloc_desc++; | |
415 | } | |
416 | /*Copy very first LLI values to descriptor*/ | |
417 | desc->ctl_lo = desc->lli->ctl_lo; | |
418 | desc->ctl_hi = desc->lli->ctl_hi; | |
419 | desc->sar = desc->lli->sar; | |
420 | desc->dar = desc->lli->dar; | |
421 | ||
422 | return 0; | |
423 | } | |
b3c567e4 VK |
424 | /***************************************************************************** |
425 | DMA engine callback Functions*/ | |
426 | /** | |
427 | * intel_mid_dma_tx_submit - callback to submit DMA transaction | |
428 | * @tx: dma engine descriptor | |
429 | * | |
430 | * Submit the DMA trasaction for this descriptor, start if ch idle | |
431 | */ | |
432 | static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |
433 | { | |
434 | struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); | |
435 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); | |
436 | dma_cookie_t cookie; | |
437 | ||
438 | spin_lock_bh(&midc->lock); | |
884485e1 | 439 | cookie = dma_cookie_assign(tx); |
b3c567e4 | 440 | |
576e3c39 | 441 | if (list_empty(&midc->active_list)) |
b3c567e4 | 442 | list_add_tail(&desc->desc_node, &midc->active_list); |
576e3c39 | 443 | else |
b3c567e4 | 444 | list_add_tail(&desc->desc_node, &midc->queue); |
576e3c39 RB |
445 | |
446 | midc_dostart(midc, desc); | |
b3c567e4 VK |
447 | spin_unlock_bh(&midc->lock); |
448 | ||
449 | return cookie; | |
450 | } | |
451 | ||
452 | /** | |
453 | * intel_mid_dma_issue_pending - callback to issue pending txn | |
454 | * @chan: chan where pending trascation needs to be checked and submitted | |
455 | * | |
456 | * Call for scan to issue pending descriptors | |
457 | */ | |
458 | static void intel_mid_dma_issue_pending(struct dma_chan *chan) | |
459 | { | |
460 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | |
461 | ||
462 | spin_lock_bh(&midc->lock); | |
463 | if (!list_empty(&midc->queue)) | |
464 | midc_scan_descriptors(to_middma_device(chan->device), midc); | |
465 | spin_unlock_bh(&midc->lock); | |
466 | } | |
467 | ||
468 | /** | |
469 | * intel_mid_dma_tx_status - Return status of txn | |
470 | * @chan: chan for where status needs to be checked | |
471 | * @cookie: cookie for txn | |
472 | * @txstate: DMA txn state | |
473 | * | |
474 | * Return status of DMA txn | |
475 | */ | |
476 | static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |
477 | dma_cookie_t cookie, | |
478 | struct dma_tx_state *txstate) | |
479 | { | |
949ff5b8 | 480 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
96a2af41 | 481 | enum dma_status ret; |
b3c567e4 | 482 | |
96a2af41 | 483 | ret = dma_cookie_status(chan, cookie, txstate); |
b3c567e4 | 484 | if (ret != DMA_SUCCESS) { |
1fded075 | 485 | spin_lock_bh(&midc->lock); |
b3c567e4 | 486 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
1fded075 | 487 | spin_unlock_bh(&midc->lock); |
b3c567e4 | 488 | |
96a2af41 | 489 | ret = dma_cookie_status(chan, cookie, txstate); |
b3c567e4 VK |
490 | } |
491 | ||
b3c567e4 VK |
492 | return ret; |
493 | } | |
494 | ||
20dd6390 KV |
495 | static int dma_slave_control(struct dma_chan *chan, unsigned long arg) |
496 | { | |
497 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | |
498 | struct dma_slave_config *slave = (struct dma_slave_config *)arg; | |
499 | struct intel_mid_dma_slave *mid_slave; | |
500 | ||
501 | BUG_ON(!midc); | |
502 | BUG_ON(!slave); | |
503 | pr_debug("MDMA: slave control called\n"); | |
504 | ||
505 | mid_slave = to_intel_mid_dma_slave(slave); | |
506 | ||
507 | BUG_ON(!mid_slave); | |
508 | ||
509 | midc->mid_slave = mid_slave; | |
510 | return 0; | |
511 | } | |
b3c567e4 VK |
512 | /** |
513 | * intel_mid_dma_device_control - DMA device control | |
514 | * @chan: chan for DMA control | |
515 | * @cmd: control cmd | |
516 | * @arg: cmd arg value | |
517 | * | |
518 | * Perform DMA control command | |
519 | */ | |
520 | static int intel_mid_dma_device_control(struct dma_chan *chan, | |
521 | enum dma_ctrl_cmd cmd, unsigned long arg) | |
522 | { | |
523 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | |
524 | struct middma_device *mid = to_middma_device(chan->device); | |
525 | struct intel_mid_dma_desc *desc, *_desc; | |
576e3c39 | 526 | union intel_mid_dma_cfg_lo cfg_lo; |
b3c567e4 | 527 | |
20dd6390 KV |
528 | if (cmd == DMA_SLAVE_CONFIG) |
529 | return dma_slave_control(chan, arg); | |
530 | ||
b3c567e4 VK |
531 | if (cmd != DMA_TERMINATE_ALL) |
532 | return -ENXIO; | |
533 | ||
534 | spin_lock_bh(&midc->lock); | |
53a61bad | 535 | if (midc->busy == false) { |
b3c567e4 VK |
536 | spin_unlock_bh(&midc->lock); |
537 | return 0; | |
538 | } | |
576e3c39 RB |
539 | /*Suspend and disable the channel*/ |
540 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); | |
541 | cfg_lo.cfgx.ch_susp = 1; | |
542 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | |
543 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | |
544 | midc->busy = false; | |
b3c567e4 VK |
545 | /* Disable interrupts */ |
546 | disable_dma_interrupt(midc); | |
576e3c39 | 547 | midc->descs_allocated = 0; |
b3c567e4 VK |
548 | |
549 | spin_unlock_bh(&midc->lock); | |
576e3c39 RB |
550 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
551 | if (desc->lli != NULL) { | |
552 | pci_pool_free(desc->lli_pool, desc->lli, | |
553 | desc->lli_phys); | |
554 | pci_pool_destroy(desc->lli_pool); | |
1fded075 | 555 | desc->lli = NULL; |
576e3c39 RB |
556 | } |
557 | list_move(&desc->desc_node, &midc->free_list); | |
b3c567e4 VK |
558 | } |
559 | return 0; | |
560 | } | |
561 | ||
b3c567e4 VK |
562 | |
563 | /** | |
564 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | |
565 | * @chan: chan for DMA transfer | |
566 | * @dest: destn address | |
567 | * @src: src address | |
568 | * @len: DMA transfer len | |
569 | * @flags: DMA flags | |
570 | * | |
571 | * Perform a DMA memcpy. Note we support slave periphral DMA transfers only | |
572 | * The periphral txn details should be filled in slave structure properly | |
573 | * Returns the descriptor for this txn | |
574 | */ | |
575 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |
576 | struct dma_chan *chan, dma_addr_t dest, | |
577 | dma_addr_t src, size_t len, unsigned long flags) | |
578 | { | |
579 | struct intel_mid_dma_chan *midc; | |
580 | struct intel_mid_dma_desc *desc = NULL; | |
581 | struct intel_mid_dma_slave *mids; | |
582 | union intel_mid_dma_ctl_lo ctl_lo; | |
583 | union intel_mid_dma_ctl_hi ctl_hi; | |
584 | union intel_mid_dma_cfg_lo cfg_lo; | |
585 | union intel_mid_dma_cfg_hi cfg_hi; | |
20dd6390 | 586 | enum dma_slave_buswidth width; |
b3c567e4 VK |
587 | |
588 | pr_debug("MDMA: Prep for memcpy\n"); | |
8b649223 | 589 | BUG_ON(!chan); |
b3c567e4 VK |
590 | if (!len) |
591 | return NULL; | |
592 | ||
b3c567e4 | 593 | midc = to_intel_mid_dma_chan(chan); |
8b649223 | 594 | BUG_ON(!midc); |
b3c567e4 | 595 | |
20dd6390 KV |
596 | mids = midc->mid_slave; |
597 | BUG_ON(!mids); | |
598 | ||
b3c567e4 VK |
599 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", |
600 | midc->dma->pci_id, midc->ch_id, len); | |
601 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", | |
20dd6390 KV |
602 | mids->cfg_mode, mids->dma_slave.direction, |
603 | mids->hs_mode, mids->dma_slave.src_addr_width); | |
b3c567e4 VK |
604 | |
605 | /*calculate CFG_LO*/ | |
606 | if (mids->hs_mode == LNW_DMA_SW_HS) { | |
607 | cfg_lo.cfg_lo = 0; | |
608 | cfg_lo.cfgx.hs_sel_dst = 1; | |
609 | cfg_lo.cfgx.hs_sel_src = 1; | |
610 | } else if (mids->hs_mode == LNW_DMA_HW_HS) | |
611 | cfg_lo.cfg_lo = 0x00000; | |
612 | ||
613 | /*calculate CFG_HI*/ | |
614 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | |
615 | /*SW HS only*/ | |
616 | cfg_hi.cfg_hi = 0; | |
617 | } else { | |
618 | cfg_hi.cfg_hi = 0; | |
619 | if (midc->dma->pimr_mask) { | |
620 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | |
621 | cfg_hi.cfgx.fifo_mode = 1; | |
db8196df | 622 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
b3c567e4 VK |
623 | cfg_hi.cfgx.src_per = 0; |
624 | if (mids->device_instance == 0) | |
625 | cfg_hi.cfgx.dst_per = 3; | |
626 | if (mids->device_instance == 1) | |
627 | cfg_hi.cfgx.dst_per = 1; | |
db8196df | 628 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
b3c567e4 VK |
629 | if (mids->device_instance == 0) |
630 | cfg_hi.cfgx.src_per = 2; | |
631 | if (mids->device_instance == 1) | |
632 | cfg_hi.cfgx.src_per = 0; | |
633 | cfg_hi.cfgx.dst_per = 0; | |
634 | } | |
635 | } else { | |
636 | cfg_hi.cfgx.protctl = 0x1; /*default value*/ | |
637 | cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = | |
638 | midc->ch_id - midc->dma->chan_base; | |
639 | } | |
640 | } | |
641 | ||
642 | /*calculate CTL_HI*/ | |
643 | ctl_hi.ctlx.reser = 0; | |
576e3c39 | 644 | ctl_hi.ctlx.done = 0; |
20dd6390 | 645 | width = mids->dma_slave.src_addr_width; |
b3c567e4 VK |
646 | |
647 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | |
648 | pr_debug("MDMA:calc len %d for block size %d\n", | |
649 | ctl_hi.ctlx.block_ts, midc->dma->block_size); | |
650 | /*calculate CTL_LO*/ | |
651 | ctl_lo.ctl_lo = 0; | |
652 | ctl_lo.ctlx.int_en = 1; | |
20dd6390 KV |
653 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
654 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; | |
b3c567e4 | 655 | |
0be035f3 FT |
656 | /* |
657 | * Here we need some translation from "enum dma_slave_buswidth" | |
658 | * to the format for our dma controller | |
659 | * standard intel_mid_dmac's format | |
660 | * 1 Byte 0b000 | |
661 | * 2 Bytes 0b001 | |
662 | * 4 Bytes 0b010 | |
663 | */ | |
664 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; | |
665 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; | |
666 | ||
b3c567e4 VK |
667 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
668 | ctl_lo.ctlx.tt_fc = 0; | |
669 | ctl_lo.ctlx.sinc = 0; | |
670 | ctl_lo.ctlx.dinc = 0; | |
671 | } else { | |
db8196df | 672 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
b3c567e4 VK |
673 | ctl_lo.ctlx.sinc = 0; |
674 | ctl_lo.ctlx.dinc = 2; | |
675 | ctl_lo.ctlx.tt_fc = 1; | |
db8196df | 676 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
b3c567e4 VK |
677 | ctl_lo.ctlx.sinc = 2; |
678 | ctl_lo.ctlx.dinc = 0; | |
679 | ctl_lo.ctlx.tt_fc = 2; | |
680 | } | |
681 | } | |
682 | ||
683 | pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", | |
684 | ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); | |
685 | ||
686 | enable_dma_interrupt(midc); | |
687 | ||
688 | desc = midc_desc_get(midc); | |
689 | if (desc == NULL) | |
690 | goto err_desc_get; | |
691 | desc->sar = src; | |
692 | desc->dar = dest ; | |
693 | desc->len = len; | |
694 | desc->cfg_hi = cfg_hi.cfg_hi; | |
695 | desc->cfg_lo = cfg_lo.cfg_lo; | |
696 | desc->ctl_lo = ctl_lo.ctl_lo; | |
697 | desc->ctl_hi = ctl_hi.ctl_hi; | |
698 | desc->width = width; | |
20dd6390 | 699 | desc->dirn = mids->dma_slave.direction; |
576e3c39 RB |
700 | desc->lli_phys = 0; |
701 | desc->lli = NULL; | |
702 | desc->lli_pool = NULL; | |
b3c567e4 VK |
703 | return &desc->txd; |
704 | ||
705 | err_desc_get: | |
706 | pr_err("ERR_MDMA: Failed to get desc\n"); | |
707 | midc_desc_put(midc, desc); | |
708 | return NULL; | |
709 | } | |
576e3c39 RB |
710 | /** |
711 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | |
712 | * @chan: chan for DMA transfer | |
713 | * @sgl: scatter gather list | |
714 | * @sg_len: length of sg txn | |
715 | * @direction: DMA transfer dirtn | |
716 | * @flags: DMA flags | |
717 | * | |
718 | * Prepares LLI based periphral transfer | |
719 | */ | |
720 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | |
721 | struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 722 | unsigned int sg_len, enum dma_transfer_direction direction, |
576e3c39 RB |
723 | unsigned long flags) |
724 | { | |
725 | struct intel_mid_dma_chan *midc = NULL; | |
726 | struct intel_mid_dma_slave *mids = NULL; | |
727 | struct intel_mid_dma_desc *desc = NULL; | |
728 | struct dma_async_tx_descriptor *txd = NULL; | |
729 | union intel_mid_dma_ctl_lo ctl_lo; | |
730 | ||
731 | pr_debug("MDMA: Prep for slave SG\n"); | |
732 | ||
733 | if (!sg_len) { | |
734 | pr_err("MDMA: Invalid SG length\n"); | |
735 | return NULL; | |
736 | } | |
737 | midc = to_intel_mid_dma_chan(chan); | |
738 | BUG_ON(!midc); | |
739 | ||
20dd6390 | 740 | mids = midc->mid_slave; |
576e3c39 RB |
741 | BUG_ON(!mids); |
742 | ||
743 | if (!midc->dma->pimr_mask) { | |
0be035f3 FT |
744 | /* We can still handle sg list with only one item */ |
745 | if (sg_len == 1) { | |
746 | txd = intel_mid_dma_prep_memcpy(chan, | |
747 | mids->dma_slave.dst_addr, | |
748 | mids->dma_slave.src_addr, | |
749 | sgl->length, | |
750 | flags); | |
751 | return txd; | |
752 | } else { | |
753 | pr_warn("MDMA: SG list is not supported by this controller\n"); | |
754 | return NULL; | |
755 | } | |
576e3c39 RB |
756 | } |
757 | ||
758 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | |
759 | sg_len, direction, flags); | |
760 | ||
761 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); | |
762 | if (NULL == txd) { | |
763 | pr_err("MDMA: Prep memcpy failed\n"); | |
764 | return NULL; | |
765 | } | |
0be035f3 | 766 | |
576e3c39 RB |
767 | desc = to_intel_mid_dma_desc(txd); |
768 | desc->dirn = direction; | |
769 | ctl_lo.ctl_lo = desc->ctl_lo; | |
770 | ctl_lo.ctlx.llp_dst_en = 1; | |
771 | ctl_lo.ctlx.llp_src_en = 1; | |
772 | desc->ctl_lo = ctl_lo.ctl_lo; | |
773 | desc->lli_length = sg_len; | |
774 | desc->current_lli = 0; | |
775 | /* DMA coherent memory pool for LLI descriptors*/ | |
776 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | |
777 | midc->dma->pdev, | |
778 | (sizeof(struct intel_mid_dma_lli)*sg_len), | |
779 | 32, 0); | |
780 | if (NULL == desc->lli_pool) { | |
781 | pr_err("MID_DMA:LLI pool create failed\n"); | |
782 | return NULL; | |
783 | } | |
784 | ||
785 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | |
786 | if (!desc->lli) { | |
787 | pr_err("MID_DMA: LLI alloc failed\n"); | |
788 | pci_pool_destroy(desc->lli_pool); | |
789 | return NULL; | |
790 | } | |
791 | ||
792 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | |
793 | if (flags & DMA_PREP_INTERRUPT) { | |
794 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | |
795 | midc->dma_base + MASK_BLOCK); | |
796 | pr_debug("MDMA:Enabled Block interrupt\n"); | |
797 | } | |
798 | return &desc->txd; | |
799 | } | |
b3c567e4 VK |
800 | |
801 | /** | |
802 | * intel_mid_dma_free_chan_resources - Frees dma resources | |
803 | * @chan: chan requiring attention | |
804 | * | |
805 | * Frees the allocated resources on this DMA chan | |
806 | */ | |
807 | static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |
808 | { | |
809 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | |
810 | struct middma_device *mid = to_middma_device(chan->device); | |
811 | struct intel_mid_dma_desc *desc, *_desc; | |
812 | ||
53a61bad | 813 | if (true == midc->busy) { |
b3c567e4 VK |
814 | /*trying to free ch in use!!!!!*/ |
815 | pr_err("ERR_MDMA: trying to free ch in use\n"); | |
816 | } | |
b3c567e4 VK |
817 | spin_lock_bh(&midc->lock); |
818 | midc->descs_allocated = 0; | |
819 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | |
820 | list_del(&desc->desc_node); | |
821 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | |
822 | } | |
823 | list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { | |
824 | list_del(&desc->desc_node); | |
825 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | |
826 | } | |
827 | list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { | |
828 | list_del(&desc->desc_node); | |
829 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | |
830 | } | |
831 | spin_unlock_bh(&midc->lock); | |
832 | midc->in_use = false; | |
53a61bad | 833 | midc->busy = false; |
b3c567e4 VK |
834 | /* Disable CH interrupts */ |
835 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | |
836 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | |
91c1c9e3 | 837 | pm_runtime_put(&mid->pdev->dev); |
b3c567e4 VK |
838 | } |
839 | ||
840 | /** | |
841 | * intel_mid_dma_alloc_chan_resources - Allocate dma resources | |
842 | * @chan: chan requiring attention | |
843 | * | |
844 | * Allocates DMA resources on this chan | |
845 | * Return the descriptors allocated | |
846 | */ | |
847 | static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |
848 | { | |
849 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | |
850 | struct middma_device *mid = to_middma_device(chan->device); | |
851 | struct intel_mid_dma_desc *desc; | |
852 | dma_addr_t phys; | |
853 | int i = 0; | |
854 | ||
53a61bad KV |
855 | pm_runtime_get_sync(&mid->pdev->dev); |
856 | ||
857 | if (mid->state == SUSPENDED) { | |
8730790b | 858 | if (dma_resume(&mid->pdev->dev)) { |
53a61bad KV |
859 | pr_err("ERR_MDMA: resume failed"); |
860 | return -EFAULT; | |
861 | } | |
862 | } | |
b3c567e4 VK |
863 | |
864 | /* ASSERT: channel is idle */ | |
865 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | |
866 | /*ch is not idle*/ | |
867 | pr_err("ERR_MDMA: ch not idle\n"); | |
53a61bad | 868 | pm_runtime_put(&mid->pdev->dev); |
b3c567e4 VK |
869 | return -EIO; |
870 | } | |
d3ee98cd | 871 | dma_cookie_init(chan); |
b3c567e4 VK |
872 | |
873 | spin_lock_bh(&midc->lock); | |
874 | while (midc->descs_allocated < DESCS_PER_CHANNEL) { | |
875 | spin_unlock_bh(&midc->lock); | |
876 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | |
877 | if (!desc) { | |
878 | pr_err("ERR_MDMA: desc failed\n"); | |
53a61bad | 879 | pm_runtime_put(&mid->pdev->dev); |
b3c567e4 VK |
880 | return -ENOMEM; |
881 | /*check*/ | |
882 | } | |
883 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
884 | desc->txd.tx_submit = intel_mid_dma_tx_submit; | |
885 | desc->txd.flags = DMA_CTRL_ACK; | |
886 | desc->txd.phys = phys; | |
887 | spin_lock_bh(&midc->lock); | |
888 | i = ++midc->descs_allocated; | |
889 | list_add_tail(&desc->desc_node, &midc->free_list); | |
890 | } | |
891 | spin_unlock_bh(&midc->lock); | |
53a61bad KV |
892 | midc->in_use = true; |
893 | midc->busy = false; | |
b3c567e4 VK |
894 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); |
895 | return i; | |
896 | } | |
897 | ||
898 | /** | |
899 | * midc_handle_error - Handle DMA txn error | |
25985edc LDM |
900 | * @mid: controller where error occurred |
901 | * @midc: chan where error occurred | |
b3c567e4 VK |
902 | * |
903 | * Scan the descriptor for error | |
904 | */ | |
905 | static void midc_handle_error(struct middma_device *mid, | |
906 | struct intel_mid_dma_chan *midc) | |
907 | { | |
908 | midc_scan_descriptors(mid, midc); | |
909 | } | |
910 | ||
911 | /** | |
912 | * dma_tasklet - DMA interrupt tasklet | |
913 | * @data: tasklet arg (the controller structure) | |
914 | * | |
915 | * Scan the controller for interrupts for completion/error | |
916 | * Clear the interrupt and call for handling completion/error | |
917 | */ | |
918 | static void dma_tasklet(unsigned long data) | |
919 | { | |
920 | struct middma_device *mid = NULL; | |
921 | struct intel_mid_dma_chan *midc = NULL; | |
576e3c39 | 922 | u32 status, raw_tfr, raw_block; |
b3c567e4 VK |
923 | int i; |
924 | ||
925 | mid = (struct middma_device *)data; | |
926 | if (mid == NULL) { | |
927 | pr_err("ERR_MDMA: tasklet Null param\n"); | |
928 | return; | |
929 | } | |
930 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | |
576e3c39 RB |
931 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); |
932 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); | |
933 | status = raw_tfr | raw_block; | |
b3c567e4 VK |
934 | status &= mid->intr_mask; |
935 | while (status) { | |
936 | /*txn interrupt*/ | |
937 | i = get_ch_index(&status, mid->chan_base); | |
938 | if (i < 0) { | |
939 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | |
940 | return; | |
941 | } | |
942 | midc = &mid->ch[i]; | |
943 | if (midc == NULL) { | |
944 | pr_err("ERR_MDMA:Null param midc\n"); | |
945 | return; | |
946 | } | |
947 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | |
948 | status, midc->ch_id, i); | |
576e3c39 RB |
949 | midc->raw_tfr = raw_tfr; |
950 | midc->raw_block = raw_block; | |
951 | spin_lock_bh(&midc->lock); | |
b3c567e4 VK |
952 | /*clearing this interrupts first*/ |
953 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | |
576e3c39 RB |
954 | if (raw_block) { |
955 | iowrite32((1 << midc->ch_id), | |
956 | mid->dma_base + CLEAR_BLOCK); | |
957 | } | |
b3c567e4 VK |
958 | midc_scan_descriptors(mid, midc); |
959 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | |
960 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | |
961 | mid->dma_base + MASK_TFR); | |
576e3c39 RB |
962 | if (raw_block) { |
963 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | |
964 | mid->dma_base + MASK_BLOCK); | |
965 | } | |
b3c567e4 VK |
966 | spin_unlock_bh(&midc->lock); |
967 | } | |
968 | ||
969 | status = ioread32(mid->dma_base + RAW_ERR); | |
970 | status &= mid->intr_mask; | |
971 | while (status) { | |
972 | /*err interrupt*/ | |
973 | i = get_ch_index(&status, mid->chan_base); | |
974 | if (i < 0) { | |
975 | pr_err("ERR_MDMA:Invalid ch index %x\n", i); | |
976 | return; | |
977 | } | |
978 | midc = &mid->ch[i]; | |
979 | if (midc == NULL) { | |
980 | pr_err("ERR_MDMA:Null param midc\n"); | |
981 | return; | |
982 | } | |
983 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | |
984 | status, midc->ch_id, i); | |
985 | ||
986 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); | |
987 | spin_lock_bh(&midc->lock); | |
988 | midc_handle_error(mid, midc); | |
989 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | |
990 | mid->dma_base + MASK_ERR); | |
991 | spin_unlock_bh(&midc->lock); | |
992 | } | |
993 | pr_debug("MDMA:Exiting takslet...\n"); | |
994 | return; | |
995 | } | |
996 | ||
997 | static void dma_tasklet1(unsigned long data) | |
998 | { | |
999 | pr_debug("MDMA:in takslet1...\n"); | |
1000 | return dma_tasklet(data); | |
1001 | } | |
1002 | ||
1003 | static void dma_tasklet2(unsigned long data) | |
1004 | { | |
1005 | pr_debug("MDMA:in takslet2...\n"); | |
1006 | return dma_tasklet(data); | |
1007 | } | |
1008 | ||
1009 | /** | |
1010 | * intel_mid_dma_interrupt - DMA ISR | |
1011 | * @irq: IRQ where interrupt occurred | |
1012 | * @data: ISR cllback data (the controller structure) | |
1013 | * | |
1014 | * See if this is our interrupt if so then schedule the tasklet | |
1015 | * otherwise ignore | |
1016 | */ | |
1017 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |
1018 | { | |
1019 | struct middma_device *mid = data; | |
b306df5e | 1020 | u32 tfr_status, err_status; |
b3c567e4 VK |
1021 | int call_tasklet = 0; |
1022 | ||
b306df5e YW |
1023 | tfr_status = ioread32(mid->dma_base + RAW_TFR); |
1024 | err_status = ioread32(mid->dma_base + RAW_ERR); | |
1025 | if (!tfr_status && !err_status) | |
1026 | return IRQ_NONE; | |
1027 | ||
b3c567e4 VK |
1028 | /*DMA Interrupt*/ |
1029 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | |
b306df5e YW |
1030 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
1031 | tfr_status &= mid->intr_mask; | |
1032 | if (tfr_status) { | |
b3c567e4 | 1033 | /*need to disable intr*/ |
576e3c39 RB |
1034 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); |
1035 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); | |
b306df5e | 1036 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); |
b3c567e4 VK |
1037 | call_tasklet = 1; |
1038 | } | |
b306df5e YW |
1039 | err_status &= mid->intr_mask; |
1040 | if (err_status) { | |
7f99a421 AH |
1041 | iowrite32((err_status << INT_MASK_WE), |
1042 | mid->dma_base + MASK_ERR); | |
b3c567e4 VK |
1043 | call_tasklet = 1; |
1044 | } | |
1045 | if (call_tasklet) | |
1046 | tasklet_schedule(&mid->tasklet); | |
1047 | ||
1048 | return IRQ_HANDLED; | |
1049 | } | |
1050 | ||
1051 | static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) | |
1052 | { | |
1053 | return intel_mid_dma_interrupt(irq, data); | |
1054 | } | |
1055 | ||
1056 | static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) | |
1057 | { | |
1058 | return intel_mid_dma_interrupt(irq, data); | |
1059 | } | |
1060 | ||
1061 | /** | |
1062 | * mid_setup_dma - Setup the DMA controller | |
1063 | * @pdev: Controller PCI device structure | |
1064 | * | |
b595076a UKK |
1065 | * Initialize the DMA controller, channels, registers with DMA engine, |
1066 | * ISR. Initialize DMA controller channels. | |
b3c567e4 VK |
1067 | */ |
1068 | static int mid_setup_dma(struct pci_dev *pdev) | |
1069 | { | |
1070 | struct middma_device *dma = pci_get_drvdata(pdev); | |
1071 | int err, i; | |
b3c567e4 VK |
1072 | |
1073 | /* DMA coherent memory pool for DMA descriptor allocations */ | |
1074 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, | |
1075 | sizeof(struct intel_mid_dma_desc), | |
1076 | 32, 0); | |
1077 | if (NULL == dma->dma_pool) { | |
1078 | pr_err("ERR_MDMA:pci_pool_create failed\n"); | |
1079 | err = -ENOMEM; | |
b3c567e4 VK |
1080 | goto err_dma_pool; |
1081 | } | |
1082 | ||
1083 | INIT_LIST_HEAD(&dma->common.channels); | |
1084 | dma->pci_id = pdev->device; | |
1085 | if (dma->pimr_mask) { | |
1086 | dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, | |
1087 | LNW_PERIPHRAL_MASK_SIZE); | |
1088 | if (dma->mask_reg == NULL) { | |
25985edc | 1089 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); |
2a0ff7a6 AH |
1090 | err = -ENOMEM; |
1091 | goto err_ioremap; | |
b3c567e4 VK |
1092 | } |
1093 | } else | |
1094 | dma->mask_reg = NULL; | |
1095 | ||
1096 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | |
1097 | /*init CH structures*/ | |
1098 | dma->intr_mask = 0; | |
53a61bad | 1099 | dma->state = RUNNING; |
b3c567e4 VK |
1100 | for (i = 0; i < dma->max_chan; i++) { |
1101 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | |
1102 | ||
1103 | midch->chan.device = &dma->common; | |
d3ee98cd | 1104 | dma_cookie_init(&midch->chan); |
b3c567e4 VK |
1105 | midch->ch_id = dma->chan_base + i; |
1106 | pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); | |
1107 | ||
1108 | midch->dma_base = dma->dma_base; | |
1109 | midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; | |
1110 | midch->dma = dma; | |
1111 | dma->intr_mask |= 1 << (dma->chan_base + i); | |
1112 | spin_lock_init(&midch->lock); | |
1113 | ||
1114 | INIT_LIST_HEAD(&midch->active_list); | |
1115 | INIT_LIST_HEAD(&midch->queue); | |
1116 | INIT_LIST_HEAD(&midch->free_list); | |
1117 | /*mask interrupts*/ | |
1118 | iowrite32(MASK_INTR_REG(midch->ch_id), | |
1119 | dma->dma_base + MASK_BLOCK); | |
1120 | iowrite32(MASK_INTR_REG(midch->ch_id), | |
1121 | dma->dma_base + MASK_SRC_TRAN); | |
1122 | iowrite32(MASK_INTR_REG(midch->ch_id), | |
1123 | dma->dma_base + MASK_DST_TRAN); | |
1124 | iowrite32(MASK_INTR_REG(midch->ch_id), | |
1125 | dma->dma_base + MASK_ERR); | |
1126 | iowrite32(MASK_INTR_REG(midch->ch_id), | |
1127 | dma->dma_base + MASK_TFR); | |
1128 | ||
1129 | disable_dma_interrupt(midch); | |
1130 | list_add_tail(&midch->chan.device_node, &dma->common.channels); | |
1131 | } | |
1132 | pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); | |
1133 | ||
1134 | /*init dma structure*/ | |
1135 | dma_cap_zero(dma->common.cap_mask); | |
1136 | dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); | |
1137 | dma_cap_set(DMA_SLAVE, dma->common.cap_mask); | |
1138 | dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); | |
1139 | dma->common.dev = &pdev->dev; | |
b3c567e4 VK |
1140 | |
1141 | dma->common.device_alloc_chan_resources = | |
1142 | intel_mid_dma_alloc_chan_resources; | |
1143 | dma->common.device_free_chan_resources = | |
1144 | intel_mid_dma_free_chan_resources; | |
1145 | ||
1146 | dma->common.device_tx_status = intel_mid_dma_tx_status; | |
1147 | dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; | |
1148 | dma->common.device_issue_pending = intel_mid_dma_issue_pending; | |
1149 | dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; | |
1150 | dma->common.device_control = intel_mid_dma_device_control; | |
1151 | ||
1152 | /*enable dma cntrl*/ | |
1153 | iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); | |
1154 | ||
1155 | /*register irq */ | |
1156 | if (dma->pimr_mask) { | |
b3c567e4 VK |
1157 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); |
1158 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, | |
1159 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); | |
1160 | if (0 != err) | |
1161 | goto err_irq; | |
1162 | } else { | |
1163 | dma->intr_mask = 0x03; | |
b3c567e4 VK |
1164 | pr_debug("MDMA:Requesting irq for DMAC2\n"); |
1165 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, | |
03b96dca | 1166 | IRQF_SHARED, "INTEL_MID_DMAC2", dma); |
b3c567e4 VK |
1167 | if (0 != err) |
1168 | goto err_irq; | |
1169 | } | |
1170 | /*register device w/ engine*/ | |
1171 | err = dma_async_device_register(&dma->common); | |
1172 | if (0 != err) { | |
1173 | pr_err("ERR_MDMA:device_register failed: %d\n", err); | |
1174 | goto err_engine; | |
1175 | } | |
1176 | if (dma->pimr_mask) { | |
1177 | pr_debug("setting up tasklet1 for DMAC1\n"); | |
1178 | tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); | |
1179 | } else { | |
1180 | pr_debug("setting up tasklet2 for DMAC2\n"); | |
1181 | tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); | |
1182 | } | |
1183 | return 0; | |
1184 | ||
1185 | err_engine: | |
1186 | free_irq(pdev->irq, dma); | |
1187 | err_irq: | |
2a0ff7a6 AH |
1188 | if (dma->mask_reg) |
1189 | iounmap(dma->mask_reg); | |
1190 | err_ioremap: | |
b3c567e4 | 1191 | pci_pool_destroy(dma->dma_pool); |
b3c567e4 VK |
1192 | err_dma_pool: |
1193 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | |
1194 | return err; | |
1195 | ||
1196 | } | |
1197 | ||
1198 | /** | |
1199 | * middma_shutdown - Shutdown the DMA controller | |
1200 | * @pdev: Controller PCI device structure | |
1201 | * | |
1202 | * Called by remove | |
1203 | * Unregister DMa controller, clear all structures and free interrupt | |
1204 | */ | |
1205 | static void middma_shutdown(struct pci_dev *pdev) | |
1206 | { | |
1207 | struct middma_device *device = pci_get_drvdata(pdev); | |
1208 | ||
1209 | dma_async_device_unregister(&device->common); | |
1210 | pci_pool_destroy(device->dma_pool); | |
1211 | if (device->mask_reg) | |
1212 | iounmap(device->mask_reg); | |
1213 | if (device->dma_base) | |
1214 | iounmap(device->dma_base); | |
1215 | free_irq(pdev->irq, device); | |
1216 | return; | |
1217 | } | |
1218 | ||
1219 | /** | |
1220 | * intel_mid_dma_probe - PCI Probe | |
1221 | * @pdev: Controller PCI device structure | |
1222 | * @id: pci device id structure | |
1223 | * | |
b595076a | 1224 | * Initialize the PCI device, map BARs, query driver data. |
b3c567e4 VK |
1225 | * Call setup_dma to complete contoller and chan initilzation |
1226 | */ | |
1227 | static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | |
1228 | const struct pci_device_id *id) | |
1229 | { | |
1230 | struct middma_device *device; | |
1231 | u32 base_addr, bar_size; | |
1232 | struct intel_mid_dma_probe_info *info; | |
1233 | int err; | |
1234 | ||
1235 | pr_debug("MDMA: probe for %x\n", pdev->device); | |
1236 | info = (void *)id->driver_data; | |
1237 | pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", | |
1238 | info->max_chan, info->ch_base, | |
1239 | info->block_size, info->pimr_mask); | |
1240 | ||
1241 | err = pci_enable_device(pdev); | |
1242 | if (err) | |
1243 | goto err_enable_device; | |
1244 | ||
1245 | err = pci_request_regions(pdev, "intel_mid_dmac"); | |
1246 | if (err) | |
1247 | goto err_request_regions; | |
1248 | ||
1249 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
1250 | if (err) | |
1251 | goto err_set_dma_mask; | |
1252 | ||
1253 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
1254 | if (err) | |
1255 | goto err_set_dma_mask; | |
1256 | ||
1257 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
1258 | if (!device) { | |
1259 | pr_err("ERR_MDMA:kzalloc failed probe\n"); | |
1260 | err = -ENOMEM; | |
1261 | goto err_kzalloc; | |
1262 | } | |
1263 | device->pdev = pci_dev_get(pdev); | |
1264 | ||
1265 | base_addr = pci_resource_start(pdev, 0); | |
1266 | bar_size = pci_resource_len(pdev, 0); | |
1267 | device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); | |
1268 | if (!device->dma_base) { | |
1269 | pr_err("ERR_MDMA:ioremap failed\n"); | |
1270 | err = -ENOMEM; | |
1271 | goto err_ioremap; | |
1272 | } | |
1273 | pci_set_drvdata(pdev, device); | |
1274 | pci_set_master(pdev); | |
1275 | device->max_chan = info->max_chan; | |
1276 | device->chan_base = info->ch_base; | |
1277 | device->block_size = info->block_size; | |
1278 | device->pimr_mask = info->pimr_mask; | |
1279 | ||
1280 | err = mid_setup_dma(pdev); | |
1281 | if (err) | |
1282 | goto err_dma; | |
1283 | ||
e2142df7 | 1284 | pm_runtime_put_noidle(&pdev->dev); |
53a61bad | 1285 | pm_runtime_allow(&pdev->dev); |
b3c567e4 VK |
1286 | return 0; |
1287 | ||
1288 | err_dma: | |
1289 | iounmap(device->dma_base); | |
1290 | err_ioremap: | |
1291 | pci_dev_put(pdev); | |
1292 | kfree(device); | |
1293 | err_kzalloc: | |
1294 | err_set_dma_mask: | |
1295 | pci_release_regions(pdev); | |
1296 | pci_disable_device(pdev); | |
1297 | err_request_regions: | |
1298 | err_enable_device: | |
1299 | pr_err("ERR_MDMA:Probe failed %d\n", err); | |
1300 | return err; | |
1301 | } | |
1302 | ||
1303 | /** | |
1304 | * intel_mid_dma_remove - PCI remove | |
1305 | * @pdev: Controller PCI device structure | |
1306 | * | |
1307 | * Free up all resources and data | |
1308 | * Call shutdown_dma to complete contoller and chan cleanup | |
1309 | */ | |
1310 | static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |
1311 | { | |
1312 | struct middma_device *device = pci_get_drvdata(pdev); | |
e2142df7 KCA |
1313 | |
1314 | pm_runtime_get_noresume(&pdev->dev); | |
1315 | pm_runtime_forbid(&pdev->dev); | |
b3c567e4 VK |
1316 | middma_shutdown(pdev); |
1317 | pci_dev_put(pdev); | |
1318 | kfree(device); | |
1319 | pci_release_regions(pdev); | |
1320 | pci_disable_device(pdev); | |
1321 | } | |
1322 | ||
53a61bad KV |
1323 | /* Power Management */ |
1324 | /* | |
1325 | * dma_suspend - PCI suspend function | |
1326 | * | |
1327 | * @pci: PCI device structure | |
1328 | * @state: PM message | |
1329 | * | |
1330 | * This function is called by OS when a power event occurs | |
1331 | */ | |
8730790b | 1332 | static int dma_suspend(struct device *dev) |
53a61bad | 1333 | { |
8730790b | 1334 | struct pci_dev *pci = to_pci_dev(dev); |
53a61bad KV |
1335 | int i; |
1336 | struct middma_device *device = pci_get_drvdata(pci); | |
1337 | pr_debug("MDMA: dma_suspend called\n"); | |
1338 | ||
1339 | for (i = 0; i < device->max_chan; i++) { | |
1340 | if (device->ch[i].in_use) | |
1341 | return -EAGAIN; | |
1342 | } | |
4598fc2c | 1343 | dmac1_mask_periphral_intr(device); |
53a61bad | 1344 | device->state = SUSPENDED; |
53a61bad KV |
1345 | pci_save_state(pci); |
1346 | pci_disable_device(pci); | |
1347 | pci_set_power_state(pci, PCI_D3hot); | |
1348 | return 0; | |
1349 | } | |
1350 | ||
1351 | /** | |
1352 | * dma_resume - PCI resume function | |
1353 | * | |
1354 | * @pci: PCI device structure | |
1355 | * | |
1356 | * This function is called by OS when a power event occurs | |
1357 | */ | |
8730790b | 1358 | int dma_resume(struct device *dev) |
53a61bad | 1359 | { |
8730790b | 1360 | struct pci_dev *pci = to_pci_dev(dev); |
53a61bad KV |
1361 | int ret; |
1362 | struct middma_device *device = pci_get_drvdata(pci); | |
1363 | ||
1364 | pr_debug("MDMA: dma_resume called\n"); | |
1365 | pci_set_power_state(pci, PCI_D0); | |
1366 | pci_restore_state(pci); | |
1367 | ret = pci_enable_device(pci); | |
1368 | if (ret) { | |
25985edc | 1369 | pr_err("MDMA: device can't be enabled for %x\n", pci->device); |
53a61bad KV |
1370 | return ret; |
1371 | } | |
1372 | device->state = RUNNING; | |
1373 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | |
53a61bad KV |
1374 | return 0; |
1375 | } | |
1376 | ||
1377 | static int dma_runtime_suspend(struct device *dev) | |
1378 | { | |
1379 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
e2142df7 KCA |
1380 | struct middma_device *device = pci_get_drvdata(pci_dev); |
1381 | ||
1382 | device->state = SUSPENDED; | |
1383 | return 0; | |
53a61bad KV |
1384 | } |
1385 | ||
1386 | static int dma_runtime_resume(struct device *dev) | |
1387 | { | |
1388 | struct pci_dev *pci_dev = to_pci_dev(dev); | |
e2142df7 KCA |
1389 | struct middma_device *device = pci_get_drvdata(pci_dev); |
1390 | ||
1391 | device->state = RUNNING; | |
1392 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | |
1393 | return 0; | |
53a61bad KV |
1394 | } |
1395 | ||
1396 | static int dma_runtime_idle(struct device *dev) | |
1397 | { | |
1398 | struct pci_dev *pdev = to_pci_dev(dev); | |
1399 | struct middma_device *device = pci_get_drvdata(pdev); | |
1400 | int i; | |
1401 | ||
1402 | for (i = 0; i < device->max_chan; i++) { | |
1403 | if (device->ch[i].in_use) | |
1404 | return -EAGAIN; | |
1405 | } | |
1406 | ||
1407 | return pm_schedule_suspend(dev, 0); | |
1408 | } | |
1409 | ||
b3c567e4 VK |
1410 | /****************************************************************************** |
1411 | * PCI stuff | |
1412 | */ | |
1413 | static struct pci_device_id intel_mid_dma_ids[] = { | |
1414 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, | |
1415 | { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, | |
1416 | { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, | |
1417 | { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, | |
1418 | { 0, } | |
1419 | }; | |
1420 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | |
1421 | ||
53a61bad KV |
1422 | static const struct dev_pm_ops intel_mid_dma_pm = { |
1423 | .runtime_suspend = dma_runtime_suspend, | |
1424 | .runtime_resume = dma_runtime_resume, | |
1425 | .runtime_idle = dma_runtime_idle, | |
8730790b KCA |
1426 | .suspend = dma_suspend, |
1427 | .resume = dma_resume, | |
53a61bad KV |
1428 | }; |
1429 | ||
cf2f9c59 | 1430 | static struct pci_driver intel_mid_dma_pci_driver = { |
b3c567e4 VK |
1431 | .name = "Intel MID DMA", |
1432 | .id_table = intel_mid_dma_ids, | |
1433 | .probe = intel_mid_dma_probe, | |
1434 | .remove = __devexit_p(intel_mid_dma_remove), | |
53a61bad | 1435 | #ifdef CONFIG_PM |
53a61bad KV |
1436 | .driver = { |
1437 | .pm = &intel_mid_dma_pm, | |
1438 | }, | |
1439 | #endif | |
b3c567e4 VK |
1440 | }; |
1441 | ||
1442 | static int __init intel_mid_dma_init(void) | |
1443 | { | |
1444 | pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", | |
1445 | INTEL_MID_DMA_DRIVER_VERSION); | |
cf2f9c59 | 1446 | return pci_register_driver(&intel_mid_dma_pci_driver); |
b3c567e4 VK |
1447 | } |
1448 | fs_initcall(intel_mid_dma_init); | |
1449 | ||
1450 | static void __exit intel_mid_dma_exit(void) | |
1451 | { | |
cf2f9c59 | 1452 | pci_unregister_driver(&intel_mid_dma_pci_driver); |
b3c567e4 VK |
1453 | } |
1454 | module_exit(intel_mid_dma_exit); | |
1455 | ||
1456 | MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); | |
1457 | MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); | |
1458 | MODULE_LICENSE("GPL v2"); | |
1459 | MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION); |