]>
Commit | Line | Data |
---|---|---|
0c42bd0e YW |
1 | /* |
2 | * Topcliff PCH DMA controller driver | |
3 | * Copyright (c) 2010 Intel Corporation | |
2cdf2455 | 4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. |
0c42bd0e YW |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
18 | */ | |
19 | ||
20 | #include <linux/dmaengine.h> | |
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/pci.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/pch_dma.h> | |
27 | ||
28 | #define DRV_NAME "pch-dma" | |
29 | ||
30 | #define DMA_CTL0_DISABLE 0x0 | |
31 | #define DMA_CTL0_SG 0x1 | |
32 | #define DMA_CTL0_ONESHOT 0x2 | |
33 | #define DMA_CTL0_MODE_MASK_BITS 0x3 | |
34 | #define DMA_CTL0_DIR_SHIFT_BITS 2 | |
35 | #define DMA_CTL0_BITS_PER_CH 4 | |
36 | ||
37 | #define DMA_CTL2_START_SHIFT_BITS 8 | |
38 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) | |
39 | ||
40 | #define DMA_STATUS_IDLE 0x0 | |
41 | #define DMA_STATUS_DESC_READ 0x1 | |
42 | #define DMA_STATUS_WAIT 0x2 | |
43 | #define DMA_STATUS_ACCESS 0x3 | |
44 | #define DMA_STATUS_BITS_PER_CH 2 | |
45 | #define DMA_STATUS_MASK_BITS 0x3 | |
46 | #define DMA_STATUS_SHIFT_BITS 16 | |
47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | |
c3d4913c TM |
48 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
49 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) | |
0c42bd0e YW |
50 | |
51 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | |
52 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | |
53 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) | |
54 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) | |
55 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF | |
56 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF | |
57 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF | |
58 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 | |
59 | #define DMA_DESC_END_WITH_IRQ 0x1 | |
60 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 | |
61 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 | |
62 | ||
63 | #define MAX_CHAN_NR 8 | |
64 | ||
65 | static unsigned int init_nr_desc_per_channel = 64; | |
66 | module_param(init_nr_desc_per_channel, uint, 0644); | |
67 | MODULE_PARM_DESC(init_nr_desc_per_channel, | |
68 | "initial descriptors per channel (default: 64)"); | |
69 | ||
70 | struct pch_dma_desc_regs { | |
71 | u32 dev_addr; | |
72 | u32 mem_addr; | |
73 | u32 size; | |
74 | u32 next; | |
75 | }; | |
76 | ||
77 | struct pch_dma_regs { | |
78 | u32 dma_ctl0; | |
79 | u32 dma_ctl1; | |
80 | u32 dma_ctl2; | |
194f5f27 | 81 | u32 dma_ctl3; |
0c42bd0e YW |
82 | u32 dma_sts0; |
83 | u32 dma_sts1; | |
194f5f27 | 84 | u32 dma_sts2; |
0c42bd0e | 85 | u32 reserved3; |
26d890f0 | 86 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
0c42bd0e YW |
87 | }; |
88 | ||
89 | struct pch_dma_desc { | |
90 | struct pch_dma_desc_regs regs; | |
91 | struct dma_async_tx_descriptor txd; | |
92 | struct list_head desc_node; | |
93 | struct list_head tx_list; | |
94 | }; | |
95 | ||
96 | struct pch_dma_chan { | |
97 | struct dma_chan chan; | |
98 | void __iomem *membase; | |
99 | enum dma_data_direction dir; | |
100 | struct tasklet_struct tasklet; | |
101 | unsigned long err_status; | |
102 | ||
103 | spinlock_t lock; | |
104 | ||
105 | dma_cookie_t completed_cookie; | |
106 | struct list_head active_list; | |
107 | struct list_head queue; | |
108 | struct list_head free_list; | |
109 | unsigned int descs_allocated; | |
110 | }; | |
111 | ||
112 | #define PDC_DEV_ADDR 0x00 | |
113 | #define PDC_MEM_ADDR 0x04 | |
114 | #define PDC_SIZE 0x08 | |
115 | #define PDC_NEXT 0x0C | |
116 | ||
117 | #define channel_readl(pdc, name) \ | |
118 | readl((pdc)->membase + PDC_##name) | |
119 | #define channel_writel(pdc, name, val) \ | |
120 | writel((val), (pdc)->membase + PDC_##name) | |
121 | ||
122 | struct pch_dma { | |
123 | struct dma_device dma; | |
124 | void __iomem *membase; | |
125 | struct pci_pool *pool; | |
126 | struct pch_dma_regs regs; | |
127 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | |
26d890f0 | 128 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
0c42bd0e YW |
129 | }; |
130 | ||
131 | #define PCH_DMA_CTL0 0x00 | |
132 | #define PCH_DMA_CTL1 0x04 | |
133 | #define PCH_DMA_CTL2 0x08 | |
194f5f27 | 134 | #define PCH_DMA_CTL3 0x0C |
0c42bd0e YW |
135 | #define PCH_DMA_STS0 0x10 |
136 | #define PCH_DMA_STS1 0x14 | |
c3d4913c | 137 | #define PCH_DMA_STS2 0x18 |
0c42bd0e YW |
138 | |
139 | #define dma_readl(pd, name) \ | |
61cd2203 | 140 | readl((pd)->membase + PCH_DMA_##name) |
0c42bd0e | 141 | #define dma_writel(pd, name, val) \ |
61cd2203 | 142 | writel((val), (pd)->membase + PCH_DMA_##name) |
0c42bd0e | 143 | |
08645fdc TM |
144 | static inline |
145 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | |
0c42bd0e YW |
146 | { |
147 | return container_of(txd, struct pch_dma_desc, txd); | |
148 | } | |
149 | ||
150 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) | |
151 | { | |
152 | return container_of(chan, struct pch_dma_chan, chan); | |
153 | } | |
154 | ||
155 | static inline struct pch_dma *to_pd(struct dma_device *ddev) | |
156 | { | |
157 | return container_of(ddev, struct pch_dma, dma); | |
158 | } | |
159 | ||
160 | static inline struct device *chan2dev(struct dma_chan *chan) | |
161 | { | |
162 | return &chan->dev->device; | |
163 | } | |
164 | ||
165 | static inline struct device *chan2parent(struct dma_chan *chan) | |
166 | { | |
167 | return chan->dev->device.parent; | |
168 | } | |
169 | ||
08645fdc TM |
170 | static inline |
171 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | |
0c42bd0e YW |
172 | { |
173 | return list_first_entry(&pd_chan->active_list, | |
174 | struct pch_dma_desc, desc_node); | |
175 | } | |
176 | ||
08645fdc TM |
177 | static inline |
178 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | |
0c42bd0e YW |
179 | { |
180 | return list_first_entry(&pd_chan->queue, | |
181 | struct pch_dma_desc, desc_node); | |
182 | } | |
183 | ||
184 | static void pdc_enable_irq(struct dma_chan *chan, int enable) | |
185 | { | |
186 | struct pch_dma *pd = to_pd(chan->device); | |
187 | u32 val; | |
c3d4913c TM |
188 | int pos; |
189 | ||
190 | if (chan->chan_id < 8) | |
191 | pos = chan->chan_id; | |
192 | else | |
193 | pos = chan->chan_id + 8; | |
0c42bd0e YW |
194 | |
195 | val = dma_readl(pd, CTL2); | |
196 | ||
197 | if (enable) | |
c3d4913c | 198 | val |= 0x1 << pos; |
0c42bd0e | 199 | else |
c3d4913c | 200 | val &= ~(0x1 << pos); |
0c42bd0e YW |
201 | |
202 | dma_writel(pd, CTL2, val); | |
203 | ||
204 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", | |
205 | chan->chan_id, val); | |
206 | } | |
207 | ||
208 | static void pdc_set_dir(struct dma_chan *chan) | |
209 | { | |
210 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
211 | struct pch_dma *pd = to_pd(chan->device); | |
212 | u32 val; | |
213 | ||
194f5f27 TM |
214 | if (chan->chan_id < 8) { |
215 | val = dma_readl(pd, CTL0); | |
0c42bd0e | 216 | |
194f5f27 TM |
217 | if (pd_chan->dir == DMA_TO_DEVICE) |
218 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
219 | DMA_CTL0_DIR_SHIFT_BITS); | |
220 | else | |
221 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | |
222 | DMA_CTL0_DIR_SHIFT_BITS)); | |
223 | ||
224 | dma_writel(pd, CTL0, val); | |
225 | } else { | |
226 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | |
227 | val = dma_readl(pd, CTL3); | |
0c42bd0e | 228 | |
194f5f27 TM |
229 | if (pd_chan->dir == DMA_TO_DEVICE) |
230 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | |
231 | DMA_CTL0_DIR_SHIFT_BITS); | |
232 | else | |
233 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | |
234 | DMA_CTL0_DIR_SHIFT_BITS)); | |
235 | ||
236 | dma_writel(pd, CTL3, val); | |
237 | } | |
0c42bd0e YW |
238 | |
239 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | |
240 | chan->chan_id, val); | |
241 | } | |
242 | ||
243 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) | |
244 | { | |
245 | struct pch_dma *pd = to_pd(chan->device); | |
246 | u32 val; | |
247 | ||
194f5f27 TM |
248 | if (chan->chan_id < 8) { |
249 | val = dma_readl(pd, CTL0); | |
0c42bd0e | 250 | |
194f5f27 TM |
251 | val &= ~(DMA_CTL0_MODE_MASK_BITS << |
252 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | |
253 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | |
0c42bd0e | 254 | |
194f5f27 TM |
255 | dma_writel(pd, CTL0, val); |
256 | } else { | |
257 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | |
258 | ||
259 | val = dma_readl(pd, CTL3); | |
260 | ||
261 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | |
262 | (DMA_CTL0_BITS_PER_CH * ch)); | |
263 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | |
264 | ||
265 | dma_writel(pd, CTL3, val); | |
266 | ||
267 | } | |
0c42bd0e YW |
268 | |
269 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | |
270 | chan->chan_id, val); | |
271 | } | |
272 | ||
c3d4913c | 273 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
0c42bd0e YW |
274 | { |
275 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
276 | u32 val; | |
277 | ||
278 | val = dma_readl(pd, STS0); | |
279 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | |
280 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | |
281 | } | |
282 | ||
c3d4913c TM |
283 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) |
284 | { | |
285 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | |
286 | u32 val; | |
287 | ||
288 | val = dma_readl(pd, STS2); | |
289 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | |
290 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); | |
291 | } | |
292 | ||
0c42bd0e YW |
293 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
294 | { | |
c3d4913c TM |
295 | u32 sts; |
296 | ||
297 | if (pd_chan->chan.chan_id < 8) | |
298 | sts = pdc_get_status0(pd_chan); | |
299 | else | |
300 | sts = pdc_get_status2(pd_chan); | |
301 | ||
302 | ||
303 | if (sts == DMA_STATUS_IDLE) | |
0c42bd0e YW |
304 | return true; |
305 | else | |
306 | return false; | |
307 | } | |
308 | ||
309 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | |
310 | { | |
0c42bd0e YW |
311 | if (!pdc_is_idle(pd_chan)) { |
312 | dev_err(chan2dev(&pd_chan->chan), | |
313 | "BUG: Attempt to start non-idle channel\n"); | |
314 | return; | |
315 | } | |
316 | ||
0c42bd0e YW |
317 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
318 | pd_chan->chan.chan_id, desc->regs.dev_addr); | |
319 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | |
320 | pd_chan->chan.chan_id, desc->regs.mem_addr); | |
321 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", | |
322 | pd_chan->chan.chan_id, desc->regs.size); | |
323 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | |
324 | pd_chan->chan.chan_id, desc->regs.next); | |
325 | ||
943d8d8b TM |
326 | if (list_empty(&desc->tx_list)) { |
327 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | |
328 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | |
329 | channel_writel(pd_chan, SIZE, desc->regs.size); | |
330 | channel_writel(pd_chan, NEXT, desc->regs.next); | |
0c42bd0e | 331 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
943d8d8b TM |
332 | } else { |
333 | channel_writel(pd_chan, NEXT, desc->txd.phys); | |
0c42bd0e | 334 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
943d8d8b | 335 | } |
0c42bd0e YW |
336 | } |
337 | ||
338 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | |
339 | struct pch_dma_desc *desc) | |
340 | { | |
341 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
342 | dma_async_tx_callback callback = txd->callback; | |
343 | void *param = txd->callback_param; | |
344 | ||
345 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | |
346 | list_move(&desc->desc_node, &pd_chan->free_list); | |
347 | ||
348 | if (callback) | |
349 | callback(param); | |
350 | } | |
351 | ||
352 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) | |
353 | { | |
354 | struct pch_dma_desc *desc, *_d; | |
355 | LIST_HEAD(list); | |
356 | ||
357 | BUG_ON(!pdc_is_idle(pd_chan)); | |
358 | ||
359 | if (!list_empty(&pd_chan->queue)) | |
360 | pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); | |
361 | ||
362 | list_splice_init(&pd_chan->active_list, &list); | |
363 | list_splice_init(&pd_chan->queue, &pd_chan->active_list); | |
364 | ||
365 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
366 | pdc_chain_complete(pd_chan, desc); | |
367 | } | |
368 | ||
369 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) | |
370 | { | |
371 | struct pch_dma_desc *bad_desc; | |
372 | ||
373 | bad_desc = pdc_first_active(pd_chan); | |
374 | list_del(&bad_desc->desc_node); | |
375 | ||
376 | list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); | |
377 | ||
378 | if (!list_empty(&pd_chan->active_list)) | |
379 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
380 | ||
381 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); | |
382 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", | |
383 | bad_desc->txd.cookie); | |
384 | ||
385 | pdc_chain_complete(pd_chan, bad_desc); | |
386 | } | |
387 | ||
388 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) | |
389 | { | |
390 | if (list_empty(&pd_chan->active_list) || | |
391 | list_is_singular(&pd_chan->active_list)) { | |
392 | pdc_complete_all(pd_chan); | |
393 | } else { | |
394 | pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); | |
395 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | |
396 | } | |
397 | } | |
398 | ||
399 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | |
400 | struct pch_dma_desc *desc) | |
401 | { | |
402 | dma_cookie_t cookie = pd_chan->chan.cookie; | |
403 | ||
404 | if (++cookie < 0) | |
405 | cookie = 1; | |
406 | ||
407 | pd_chan->chan.cookie = cookie; | |
408 | desc->txd.cookie = cookie; | |
409 | ||
410 | return cookie; | |
411 | } | |
412 | ||
413 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | |
414 | { | |
415 | struct pch_dma_desc *desc = to_pd_desc(txd); | |
416 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | |
417 | dma_cookie_t cookie; | |
418 | ||
c5a9f9d0 | 419 | spin_lock(&pd_chan->lock); |
0c42bd0e YW |
420 | cookie = pdc_assign_cookie(pd_chan, desc); |
421 | ||
422 | if (list_empty(&pd_chan->active_list)) { | |
423 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | |
424 | pdc_dostart(pd_chan, desc); | |
425 | } else { | |
426 | list_add_tail(&desc->desc_node, &pd_chan->queue); | |
427 | } | |
428 | ||
c5a9f9d0 | 429 | spin_unlock(&pd_chan->lock); |
0c42bd0e YW |
430 | return 0; |
431 | } | |
432 | ||
433 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |
434 | { | |
435 | struct pch_dma_desc *desc = NULL; | |
436 | struct pch_dma *pd = to_pd(chan->device); | |
437 | dma_addr_t addr; | |
438 | ||
c5a9f9d0 | 439 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
0c42bd0e YW |
440 | if (desc) { |
441 | memset(desc, 0, sizeof(struct pch_dma_desc)); | |
442 | INIT_LIST_HEAD(&desc->tx_list); | |
443 | dma_async_tx_descriptor_init(&desc->txd, chan); | |
444 | desc->txd.tx_submit = pd_tx_submit; | |
445 | desc->txd.flags = DMA_CTRL_ACK; | |
446 | desc->txd.phys = addr; | |
447 | } | |
448 | ||
449 | return desc; | |
450 | } | |
451 | ||
452 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |
453 | { | |
454 | struct pch_dma_desc *desc, *_d; | |
455 | struct pch_dma_desc *ret = NULL; | |
364de778 | 456 | int i = 0; |
0c42bd0e | 457 | |
c5a9f9d0 | 458 | spin_lock(&pd_chan->lock); |
0c42bd0e YW |
459 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
460 | i++; | |
461 | if (async_tx_test_ack(&desc->txd)) { | |
462 | list_del(&desc->desc_node); | |
463 | ret = desc; | |
464 | break; | |
465 | } | |
466 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | |
467 | } | |
c5a9f9d0 | 468 | spin_unlock(&pd_chan->lock); |
0c42bd0e YW |
469 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
470 | ||
471 | if (!ret) { | |
472 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | |
473 | if (ret) { | |
c5a9f9d0 | 474 | spin_lock(&pd_chan->lock); |
0c42bd0e | 475 | pd_chan->descs_allocated++; |
c5a9f9d0 | 476 | spin_unlock(&pd_chan->lock); |
0c42bd0e YW |
477 | } else { |
478 | dev_err(chan2dev(&pd_chan->chan), | |
479 | "failed to alloc desc\n"); | |
480 | } | |
481 | } | |
482 | ||
483 | return ret; | |
484 | } | |
485 | ||
486 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, | |
487 | struct pch_dma_desc *desc) | |
488 | { | |
489 | if (desc) { | |
c5a9f9d0 | 490 | spin_lock(&pd_chan->lock); |
0c42bd0e YW |
491 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
492 | list_add(&desc->desc_node, &pd_chan->free_list); | |
c5a9f9d0 | 493 | spin_unlock(&pd_chan->lock); |
0c42bd0e YW |
494 | } |
495 | } | |
496 | ||
497 | static int pd_alloc_chan_resources(struct dma_chan *chan) | |
498 | { | |
499 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
500 | struct pch_dma_desc *desc; | |
501 | LIST_HEAD(tmp_list); | |
502 | int i; | |
503 | ||
504 | if (!pdc_is_idle(pd_chan)) { | |
505 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | |
506 | return -EIO; | |
507 | } | |
508 | ||
509 | if (!list_empty(&pd_chan->free_list)) | |
510 | return pd_chan->descs_allocated; | |
511 | ||
512 | for (i = 0; i < init_nr_desc_per_channel; i++) { | |
513 | desc = pdc_alloc_desc(chan, GFP_KERNEL); | |
514 | ||
515 | if (!desc) { | |
516 | dev_warn(chan2dev(chan), | |
517 | "Only allocated %d initial descriptors\n", i); | |
518 | break; | |
519 | } | |
520 | ||
521 | list_add_tail(&desc->desc_node, &tmp_list); | |
522 | } | |
523 | ||
70f18915 | 524 | spin_lock_irq(&pd_chan->lock); |
0c42bd0e YW |
525 | list_splice(&tmp_list, &pd_chan->free_list); |
526 | pd_chan->descs_allocated = i; | |
527 | pd_chan->completed_cookie = chan->cookie = 1; | |
70f18915 | 528 | spin_unlock_irq(&pd_chan->lock); |
0c42bd0e YW |
529 | |
530 | pdc_enable_irq(chan, 1); | |
0c42bd0e YW |
531 | |
532 | return pd_chan->descs_allocated; | |
533 | } | |
534 | ||
535 | static void pd_free_chan_resources(struct dma_chan *chan) | |
536 | { | |
537 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
538 | struct pch_dma *pd = to_pd(chan->device); | |
539 | struct pch_dma_desc *desc, *_d; | |
540 | LIST_HEAD(tmp_list); | |
541 | ||
542 | BUG_ON(!pdc_is_idle(pd_chan)); | |
543 | BUG_ON(!list_empty(&pd_chan->active_list)); | |
544 | BUG_ON(!list_empty(&pd_chan->queue)); | |
545 | ||
70f18915 | 546 | spin_lock_irq(&pd_chan->lock); |
0c42bd0e YW |
547 | list_splice_init(&pd_chan->free_list, &tmp_list); |
548 | pd_chan->descs_allocated = 0; | |
70f18915 | 549 | spin_unlock_irq(&pd_chan->lock); |
0c42bd0e YW |
550 | |
551 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | |
552 | pci_pool_free(pd->pool, desc, desc->txd.phys); | |
553 | ||
554 | pdc_enable_irq(chan, 0); | |
555 | } | |
556 | ||
557 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
558 | struct dma_tx_state *txstate) | |
559 | { | |
560 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
561 | dma_cookie_t last_used; | |
562 | dma_cookie_t last_completed; | |
563 | int ret; | |
564 | ||
70f18915 | 565 | spin_lock_irq(&pd_chan->lock); |
0c42bd0e YW |
566 | last_completed = pd_chan->completed_cookie; |
567 | last_used = chan->cookie; | |
70f18915 | 568 | spin_unlock_irq(&pd_chan->lock); |
0c42bd0e YW |
569 | |
570 | ret = dma_async_is_complete(cookie, last_completed, last_used); | |
571 | ||
572 | dma_set_tx_state(txstate, last_completed, last_used, 0); | |
573 | ||
574 | return ret; | |
575 | } | |
576 | ||
577 | static void pd_issue_pending(struct dma_chan *chan) | |
578 | { | |
579 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
580 | ||
581 | if (pdc_is_idle(pd_chan)) { | |
c5a9f9d0 | 582 | spin_lock(&pd_chan->lock); |
0c42bd0e | 583 | pdc_advance_work(pd_chan); |
c5a9f9d0 | 584 | spin_unlock(&pd_chan->lock); |
0c42bd0e YW |
585 | } |
586 | } | |
587 | ||
588 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |
589 | struct scatterlist *sgl, unsigned int sg_len, | |
590 | enum dma_data_direction direction, unsigned long flags) | |
591 | { | |
592 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
593 | struct pch_dma_slave *pd_slave = chan->private; | |
594 | struct pch_dma_desc *first = NULL; | |
595 | struct pch_dma_desc *prev = NULL; | |
596 | struct pch_dma_desc *desc = NULL; | |
597 | struct scatterlist *sg; | |
598 | dma_addr_t reg; | |
599 | int i; | |
600 | ||
601 | if (unlikely(!sg_len)) { | |
602 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); | |
603 | return NULL; | |
604 | } | |
605 | ||
606 | if (direction == DMA_FROM_DEVICE) | |
607 | reg = pd_slave->rx_reg; | |
608 | else if (direction == DMA_TO_DEVICE) | |
609 | reg = pd_slave->tx_reg; | |
610 | else | |
611 | return NULL; | |
612 | ||
c8fcba60 TM |
613 | pd_chan->dir = direction; |
614 | pdc_set_dir(chan); | |
615 | ||
0c42bd0e YW |
616 | for_each_sg(sgl, sg, sg_len, i) { |
617 | desc = pdc_desc_get(pd_chan); | |
618 | ||
619 | if (!desc) | |
620 | goto err_desc_get; | |
621 | ||
622 | desc->regs.dev_addr = reg; | |
623 | desc->regs.mem_addr = sg_phys(sg); | |
624 | desc->regs.size = sg_dma_len(sg); | |
625 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; | |
626 | ||
627 | switch (pd_slave->width) { | |
628 | case PCH_DMA_WIDTH_1_BYTE: | |
629 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) | |
630 | goto err_desc_get; | |
631 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; | |
632 | break; | |
633 | case PCH_DMA_WIDTH_2_BYTES: | |
634 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) | |
635 | goto err_desc_get; | |
636 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; | |
637 | break; | |
638 | case PCH_DMA_WIDTH_4_BYTES: | |
639 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) | |
640 | goto err_desc_get; | |
641 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; | |
642 | break; | |
643 | default: | |
644 | goto err_desc_get; | |
645 | } | |
646 | ||
0c42bd0e YW |
647 | if (!first) { |
648 | first = desc; | |
649 | } else { | |
650 | prev->regs.next |= desc->txd.phys; | |
651 | list_add_tail(&desc->desc_node, &first->tx_list); | |
652 | } | |
653 | ||
654 | prev = desc; | |
655 | } | |
656 | ||
657 | if (flags & DMA_PREP_INTERRUPT) | |
658 | desc->regs.next = DMA_DESC_END_WITH_IRQ; | |
659 | else | |
660 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; | |
661 | ||
662 | first->txd.cookie = -EBUSY; | |
663 | desc->txd.flags = flags; | |
664 | ||
665 | return &first->txd; | |
666 | ||
667 | err_desc_get: | |
668 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); | |
669 | pdc_desc_put(pd_chan, first); | |
670 | return NULL; | |
671 | } | |
672 | ||
673 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
674 | unsigned long arg) | |
675 | { | |
676 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | |
677 | struct pch_dma_desc *desc, *_d; | |
678 | LIST_HEAD(list); | |
679 | ||
680 | if (cmd != DMA_TERMINATE_ALL) | |
681 | return -ENXIO; | |
682 | ||
70f18915 | 683 | spin_lock_irq(&pd_chan->lock); |
0c42bd0e YW |
684 | |
685 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | |
686 | ||
687 | list_splice_init(&pd_chan->active_list, &list); | |
688 | list_splice_init(&pd_chan->queue, &list); | |
689 | ||
690 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
691 | pdc_chain_complete(pd_chan, desc); | |
692 | ||
70f18915 | 693 | spin_unlock_irq(&pd_chan->lock); |
0c42bd0e | 694 | |
0c42bd0e YW |
695 | return 0; |
696 | } | |
697 | ||
698 | static void pdc_tasklet(unsigned long data) | |
699 | { | |
700 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | |
c5a9f9d0 | 701 | unsigned long flags; |
0c42bd0e YW |
702 | |
703 | if (!pdc_is_idle(pd_chan)) { | |
704 | dev_err(chan2dev(&pd_chan->chan), | |
705 | "BUG: handle non-idle channel in tasklet\n"); | |
706 | return; | |
707 | } | |
708 | ||
c5a9f9d0 | 709 | spin_lock_irqsave(&pd_chan->lock, flags); |
0c42bd0e YW |
710 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
711 | pdc_handle_error(pd_chan); | |
712 | else | |
713 | pdc_advance_work(pd_chan); | |
c5a9f9d0 | 714 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
0c42bd0e YW |
715 | } |
716 | ||
717 | static irqreturn_t pd_irq(int irq, void *devid) | |
718 | { | |
719 | struct pch_dma *pd = (struct pch_dma *)devid; | |
720 | struct pch_dma_chan *pd_chan; | |
721 | u32 sts0; | |
c3d4913c | 722 | u32 sts2; |
0c42bd0e | 723 | int i; |
c3d4913c TM |
724 | int ret0 = IRQ_NONE; |
725 | int ret2 = IRQ_NONE; | |
0c42bd0e YW |
726 | |
727 | sts0 = dma_readl(pd, STS0); | |
c3d4913c | 728 | sts2 = dma_readl(pd, STS2); |
0c42bd0e YW |
729 | |
730 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | |
731 | ||
732 | for (i = 0; i < pd->dma.chancnt; i++) { | |
733 | pd_chan = &pd->channels[i]; | |
734 | ||
c3d4913c TM |
735 | if (i < 8) { |
736 | if (sts0 & DMA_STATUS_IRQ(i)) { | |
737 | if (sts0 & DMA_STATUS0_ERR(i)) | |
738 | set_bit(0, &pd_chan->err_status); | |
0c42bd0e | 739 | |
c3d4913c TM |
740 | tasklet_schedule(&pd_chan->tasklet); |
741 | ret0 = IRQ_HANDLED; | |
742 | } | |
743 | } else { | |
744 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { | |
745 | if (sts2 & DMA_STATUS2_ERR(i)) | |
746 | set_bit(0, &pd_chan->err_status); | |
0c42bd0e | 747 | |
c3d4913c TM |
748 | tasklet_schedule(&pd_chan->tasklet); |
749 | ret2 = IRQ_HANDLED; | |
750 | } | |
751 | } | |
0c42bd0e YW |
752 | } |
753 | ||
754 | /* clear interrupt bits in status register */ | |
c3d4913c TM |
755 | if (ret0) |
756 | dma_writel(pd, STS0, sts0); | |
757 | if (ret2) | |
758 | dma_writel(pd, STS2, sts2); | |
0c42bd0e | 759 | |
c3d4913c | 760 | return ret0 | ret2; |
0c42bd0e YW |
761 | } |
762 | ||
0b863b33 | 763 | #ifdef CONFIG_PM |
0c42bd0e YW |
764 | static void pch_dma_save_regs(struct pch_dma *pd) |
765 | { | |
766 | struct pch_dma_chan *pd_chan; | |
767 | struct dma_chan *chan, *_c; | |
768 | int i = 0; | |
769 | ||
770 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | |
771 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | |
772 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | |
194f5f27 | 773 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); |
0c42bd0e YW |
774 | |
775 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
776 | pd_chan = to_pd_chan(chan); | |
777 | ||
778 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); | |
779 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); | |
780 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); | |
781 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); | |
782 | ||
783 | i++; | |
784 | } | |
785 | } | |
786 | ||
787 | static void pch_dma_restore_regs(struct pch_dma *pd) | |
788 | { | |
789 | struct pch_dma_chan *pd_chan; | |
790 | struct dma_chan *chan, *_c; | |
791 | int i = 0; | |
792 | ||
793 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | |
794 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | |
795 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | |
194f5f27 | 796 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); |
0c42bd0e YW |
797 | |
798 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | |
799 | pd_chan = to_pd_chan(chan); | |
800 | ||
801 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); | |
802 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); | |
803 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); | |
804 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); | |
805 | ||
806 | i++; | |
807 | } | |
808 | } | |
809 | ||
810 | static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) | |
811 | { | |
812 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
813 | ||
814 | if (pd) | |
815 | pch_dma_save_regs(pd); | |
816 | ||
817 | pci_save_state(pdev); | |
818 | pci_disable_device(pdev); | |
819 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
820 | ||
821 | return 0; | |
822 | } | |
823 | ||
824 | static int pch_dma_resume(struct pci_dev *pdev) | |
825 | { | |
826 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
827 | int err; | |
828 | ||
829 | pci_set_power_state(pdev, PCI_D0); | |
830 | pci_restore_state(pdev); | |
831 | ||
832 | err = pci_enable_device(pdev); | |
833 | if (err) { | |
834 | dev_dbg(&pdev->dev, "failed to enable device\n"); | |
835 | return err; | |
836 | } | |
837 | ||
838 | if (pd) | |
839 | pch_dma_restore_regs(pd); | |
840 | ||
841 | return 0; | |
842 | } | |
0b863b33 | 843 | #endif |
0c42bd0e YW |
844 | |
845 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | |
846 | const struct pci_device_id *id) | |
847 | { | |
848 | struct pch_dma *pd; | |
849 | struct pch_dma_regs *regs; | |
850 | unsigned int nr_channels; | |
851 | int err; | |
852 | int i; | |
853 | ||
854 | nr_channels = id->driver_data; | |
855 | pd = kzalloc(sizeof(struct pch_dma)+ | |
856 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); | |
857 | if (!pd) | |
858 | return -ENOMEM; | |
859 | ||
860 | pci_set_drvdata(pdev, pd); | |
861 | ||
862 | err = pci_enable_device(pdev); | |
863 | if (err) { | |
864 | dev_err(&pdev->dev, "Cannot enable PCI device\n"); | |
865 | goto err_free_mem; | |
866 | } | |
867 | ||
868 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | |
869 | dev_err(&pdev->dev, "Cannot find proper base address\n"); | |
870 | goto err_disable_pdev; | |
871 | } | |
872 | ||
873 | err = pci_request_regions(pdev, DRV_NAME); | |
874 | if (err) { | |
875 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); | |
876 | goto err_disable_pdev; | |
877 | } | |
878 | ||
879 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
880 | if (err) { | |
881 | dev_err(&pdev->dev, "Cannot set proper DMA config\n"); | |
882 | goto err_free_res; | |
883 | } | |
884 | ||
885 | regs = pd->membase = pci_iomap(pdev, 1, 0); | |
886 | if (!pd->membase) { | |
887 | dev_err(&pdev->dev, "Cannot map MMIO registers\n"); | |
888 | err = -ENOMEM; | |
889 | goto err_free_res; | |
890 | } | |
891 | ||
892 | pci_set_master(pdev); | |
893 | ||
894 | err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); | |
895 | if (err) { | |
896 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | |
897 | goto err_iounmap; | |
898 | } | |
899 | ||
900 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, | |
901 | sizeof(struct pch_dma_desc), 4, 0); | |
902 | if (!pd->pool) { | |
903 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); | |
904 | err = -ENOMEM; | |
905 | goto err_free_irq; | |
906 | } | |
907 | ||
908 | pd->dma.dev = &pdev->dev; | |
909 | pd->dma.chancnt = nr_channels; | |
910 | ||
911 | INIT_LIST_HEAD(&pd->dma.channels); | |
912 | ||
913 | for (i = 0; i < nr_channels; i++) { | |
914 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | |
915 | ||
916 | pd_chan->chan.device = &pd->dma; | |
917 | pd_chan->chan.cookie = 1; | |
918 | pd_chan->chan.chan_id = i; | |
919 | ||
920 | pd_chan->membase = ®s->desc[i]; | |
921 | ||
0c42bd0e YW |
922 | spin_lock_init(&pd_chan->lock); |
923 | ||
924 | INIT_LIST_HEAD(&pd_chan->active_list); | |
925 | INIT_LIST_HEAD(&pd_chan->queue); | |
926 | INIT_LIST_HEAD(&pd_chan->free_list); | |
927 | ||
928 | tasklet_init(&pd_chan->tasklet, pdc_tasklet, | |
929 | (unsigned long)pd_chan); | |
930 | list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); | |
931 | } | |
932 | ||
933 | dma_cap_zero(pd->dma.cap_mask); | |
934 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); | |
935 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); | |
936 | ||
937 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; | |
938 | pd->dma.device_free_chan_resources = pd_free_chan_resources; | |
939 | pd->dma.device_tx_status = pd_tx_status; | |
940 | pd->dma.device_issue_pending = pd_issue_pending; | |
941 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; | |
942 | pd->dma.device_control = pd_device_control; | |
943 | ||
944 | err = dma_async_device_register(&pd->dma); | |
945 | if (err) { | |
946 | dev_err(&pdev->dev, "Failed to register DMA device\n"); | |
947 | goto err_free_pool; | |
948 | } | |
949 | ||
950 | return 0; | |
951 | ||
952 | err_free_pool: | |
953 | pci_pool_destroy(pd->pool); | |
954 | err_free_irq: | |
955 | free_irq(pdev->irq, pd); | |
956 | err_iounmap: | |
957 | pci_iounmap(pdev, pd->membase); | |
958 | err_free_res: | |
959 | pci_release_regions(pdev); | |
960 | err_disable_pdev: | |
961 | pci_disable_device(pdev); | |
962 | err_free_mem: | |
963 | return err; | |
964 | } | |
965 | ||
966 | static void __devexit pch_dma_remove(struct pci_dev *pdev) | |
967 | { | |
968 | struct pch_dma *pd = pci_get_drvdata(pdev); | |
969 | struct pch_dma_chan *pd_chan; | |
970 | struct dma_chan *chan, *_c; | |
971 | ||
972 | if (pd) { | |
973 | dma_async_device_unregister(&pd->dma); | |
974 | ||
975 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, | |
976 | device_node) { | |
977 | pd_chan = to_pd_chan(chan); | |
978 | ||
979 | tasklet_disable(&pd_chan->tasklet); | |
980 | tasklet_kill(&pd_chan->tasklet); | |
981 | } | |
982 | ||
983 | pci_pool_destroy(pd->pool); | |
984 | free_irq(pdev->irq, pd); | |
985 | pci_iounmap(pdev, pd->membase); | |
986 | pci_release_regions(pdev); | |
987 | pci_disable_device(pdev); | |
988 | kfree(pd); | |
989 | } | |
990 | } | |
991 | ||
992 | /* PCI Device ID of DMA device */ | |
2cdf2455 TM |
993 | #define PCI_VENDOR_ID_ROHM 0x10DB |
994 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 | |
995 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | |
996 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | |
997 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | |
998 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | |
194f5f27 | 999 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 |
c0dfc04a TM |
1000 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B |
1001 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | |
1002 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | |
1003 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | |
0c42bd0e | 1004 | |
eb8590b5 | 1005 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
2cdf2455 TM |
1006 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
1007 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | |
1008 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | |
1009 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | |
1010 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | |
194f5f27 | 1011 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ |
c0dfc04a TM |
1012 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ |
1013 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | |
1014 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | |
1015 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | |
87acf5ad | 1016 | { 0, }, |
0c42bd0e YW |
1017 | }; |
1018 | ||
1019 | static struct pci_driver pch_dma_driver = { | |
1020 | .name = DRV_NAME, | |
1021 | .id_table = pch_dma_id_table, | |
1022 | .probe = pch_dma_probe, | |
1023 | .remove = __devexit_p(pch_dma_remove), | |
1024 | #ifdef CONFIG_PM | |
1025 | .suspend = pch_dma_suspend, | |
1026 | .resume = pch_dma_resume, | |
1027 | #endif | |
1028 | }; | |
1029 | ||
1030 | static int __init pch_dma_init(void) | |
1031 | { | |
1032 | return pci_register_driver(&pch_dma_driver); | |
1033 | } | |
1034 | ||
1035 | static void __exit pch_dma_exit(void) | |
1036 | { | |
1037 | pci_unregister_driver(&pch_dma_driver); | |
1038 | } | |
1039 | ||
1040 | module_init(pch_dma_init); | |
1041 | module_exit(pch_dma_exit); | |
1042 | ||
2cdf2455 TM |
1043 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
1044 | "DMA controller driver"); | |
0c42bd0e YW |
1045 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1046 | MODULE_LICENSE("GPL v2"); |