]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | ** PARISC 1.1 Dynamic DMA mapping support. | |
3 | ** This implementation is for PA-RISC platforms that do not support | |
4 | ** I/O TLBs (aka DMA address translation hardware). | |
395cf969 | 5 | ** See Documentation/DMA-API-HOWTO.txt for interface definitions. |
1da177e4 LT |
6 | ** |
7 | ** (c) Copyright 1999,2000 Hewlett-Packard Company | |
8 | ** (c) Copyright 2000 Grant Grundler | |
9 | ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org> | |
10 | ** (c) Copyright 2000 John Marvin | |
11 | ** | |
12 | ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c. | |
13 | ** (I assume it's from David Mosberger-Tang but there was no Copyright) | |
14 | ** | |
15 | ** AFAIK, all PA7100LC and PA7300LC platforms can use this code. | |
16 | ** | |
17 | ** - ggg | |
18 | */ | |
19 | ||
20 | #include <linux/init.h> | |
5a0e3ad6 | 21 | #include <linux/gfp.h> |
1da177e4 LT |
22 | #include <linux/mm.h> |
23 | #include <linux/pci.h> | |
24 | #include <linux/proc_fs.h> | |
27f282b9 | 25 | #include <linux/seq_file.h> |
1da177e4 LT |
26 | #include <linux/string.h> |
27 | #include <linux/types.h> | |
b61e8f48 | 28 | #include <linux/scatterlist.h> |
a87df54e | 29 | #include <linux/export.h> |
1da177e4 LT |
30 | |
31 | #include <asm/cacheflush.h> | |
32 | #include <asm/dma.h> /* for DMA_CHUNK_SIZE */ | |
33 | #include <asm/io.h> | |
34 | #include <asm/page.h> /* get_order */ | |
35 | #include <asm/pgalloc.h> | |
36 | #include <asm/uaccess.h> | |
b8db8002 | 37 | #include <asm/tlbflush.h> /* for purge_tlb_*() macros */ |
1da177e4 | 38 | |
8039de10 | 39 | static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL; |
8039de10 HD |
40 | static unsigned long pcxl_used_bytes __read_mostly = 0; |
41 | static unsigned long pcxl_used_pages __read_mostly = 0; | |
1da177e4 LT |
42 | |
43 | extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */ | |
44 | static spinlock_t pcxl_res_lock; | |
45 | static char *pcxl_res_map; | |
46 | static int pcxl_res_hint; | |
47 | static int pcxl_res_size; | |
48 | ||
49 | #ifdef DEBUG_PCXL_RESOURCE | |
50 | #define DBG_RES(x...) printk(x) | |
51 | #else | |
52 | #define DBG_RES(x...) | |
53 | #endif | |
54 | ||
55 | ||
56 | /* | |
57 | ** Dump a hex representation of the resource map. | |
58 | */ | |
59 | ||
60 | #ifdef DUMP_RESMAP | |
61 | static | |
62 | void dump_resmap(void) | |
63 | { | |
64 | u_long *res_ptr = (unsigned long *)pcxl_res_map; | |
65 | u_long i = 0; | |
66 | ||
67 | printk("res_map: "); | |
68 | for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr) | |
69 | printk("%08lx ", *res_ptr); | |
70 | ||
71 | printk("\n"); | |
72 | } | |
73 | #else | |
74 | static inline void dump_resmap(void) {;} | |
75 | #endif | |
76 | ||
77 | static int pa11_dma_supported( struct device *dev, u64 mask) | |
78 | { | |
79 | return 1; | |
80 | } | |
81 | ||
82 | static inline int map_pte_uncached(pte_t * pte, | |
83 | unsigned long vaddr, | |
84 | unsigned long size, unsigned long *paddr_ptr) | |
85 | { | |
86 | unsigned long end; | |
87 | unsigned long orig_vaddr = vaddr; | |
88 | ||
89 | vaddr &= ~PMD_MASK; | |
90 | end = vaddr + size; | |
91 | if (end > PMD_SIZE) | |
92 | end = PMD_SIZE; | |
93 | do { | |
e82a3b75 HD |
94 | unsigned long flags; |
95 | ||
1da177e4 LT |
96 | if (!pte_none(*pte)) |
97 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); | |
e82a3b75 | 98 | purge_tlb_start(flags); |
c0452fb9 | 99 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); |
1da177e4 | 100 | pdtlb_kernel(orig_vaddr); |
e82a3b75 | 101 | purge_tlb_end(flags); |
1da177e4 LT |
102 | vaddr += PAGE_SIZE; |
103 | orig_vaddr += PAGE_SIZE; | |
104 | (*paddr_ptr) += PAGE_SIZE; | |
105 | pte++; | |
106 | } while (vaddr < end); | |
107 | return 0; | |
108 | } | |
109 | ||
110 | static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, | |
111 | unsigned long size, unsigned long *paddr_ptr) | |
112 | { | |
113 | unsigned long end; | |
114 | unsigned long orig_vaddr = vaddr; | |
115 | ||
116 | vaddr &= ~PGDIR_MASK; | |
117 | end = vaddr + size; | |
118 | if (end > PGDIR_SIZE) | |
119 | end = PGDIR_SIZE; | |
120 | do { | |
872fec16 | 121 | pte_t * pte = pte_alloc_kernel(pmd, vaddr); |
1da177e4 LT |
122 | if (!pte) |
123 | return -ENOMEM; | |
124 | if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) | |
125 | return -ENOMEM; | |
126 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; | |
127 | orig_vaddr += PMD_SIZE; | |
128 | pmd++; | |
129 | } while (vaddr < end); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | static inline int map_uncached_pages(unsigned long vaddr, unsigned long size, | |
134 | unsigned long paddr) | |
135 | { | |
136 | pgd_t * dir; | |
137 | unsigned long end = vaddr + size; | |
138 | ||
139 | dir = pgd_offset_k(vaddr); | |
140 | do { | |
141 | pmd_t *pmd; | |
142 | ||
143 | pmd = pmd_alloc(NULL, dir, vaddr); | |
144 | if (!pmd) | |
145 | return -ENOMEM; | |
146 | if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr)) | |
147 | return -ENOMEM; | |
148 | vaddr = vaddr + PGDIR_SIZE; | |
149 | dir++; | |
150 | } while (vaddr && (vaddr < end)); | |
151 | return 0; | |
152 | } | |
153 | ||
154 | static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, | |
155 | unsigned long size) | |
156 | { | |
157 | pte_t * pte; | |
158 | unsigned long end; | |
159 | unsigned long orig_vaddr = vaddr; | |
160 | ||
161 | if (pmd_none(*pmd)) | |
162 | return; | |
163 | if (pmd_bad(*pmd)) { | |
164 | pmd_ERROR(*pmd); | |
165 | pmd_clear(pmd); | |
166 | return; | |
167 | } | |
168 | pte = pte_offset_map(pmd, vaddr); | |
169 | vaddr &= ~PMD_MASK; | |
170 | end = vaddr + size; | |
171 | if (end > PMD_SIZE) | |
172 | end = PMD_SIZE; | |
173 | do { | |
e82a3b75 | 174 | unsigned long flags; |
1da177e4 | 175 | pte_t page = *pte; |
e82a3b75 | 176 | |
1da177e4 | 177 | pte_clear(&init_mm, vaddr, pte); |
e82a3b75 | 178 | purge_tlb_start(flags); |
1da177e4 | 179 | pdtlb_kernel(orig_vaddr); |
e82a3b75 | 180 | purge_tlb_end(flags); |
1da177e4 LT |
181 | vaddr += PAGE_SIZE; |
182 | orig_vaddr += PAGE_SIZE; | |
183 | pte++; | |
184 | if (pte_none(page) || pte_present(page)) | |
185 | continue; | |
186 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n"); | |
187 | } while (vaddr < end); | |
188 | } | |
189 | ||
190 | static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr, | |
191 | unsigned long size) | |
192 | { | |
193 | pmd_t * pmd; | |
194 | unsigned long end; | |
195 | unsigned long orig_vaddr = vaddr; | |
196 | ||
197 | if (pgd_none(*dir)) | |
198 | return; | |
199 | if (pgd_bad(*dir)) { | |
200 | pgd_ERROR(*dir); | |
201 | pgd_clear(dir); | |
202 | return; | |
203 | } | |
204 | pmd = pmd_offset(dir, vaddr); | |
205 | vaddr &= ~PGDIR_MASK; | |
206 | end = vaddr + size; | |
207 | if (end > PGDIR_SIZE) | |
208 | end = PGDIR_SIZE; | |
209 | do { | |
210 | unmap_uncached_pte(pmd, orig_vaddr, end - vaddr); | |
211 | vaddr = (vaddr + PMD_SIZE) & PMD_MASK; | |
212 | orig_vaddr += PMD_SIZE; | |
213 | pmd++; | |
214 | } while (vaddr < end); | |
215 | } | |
216 | ||
217 | static void unmap_uncached_pages(unsigned long vaddr, unsigned long size) | |
218 | { | |
219 | pgd_t * dir; | |
220 | unsigned long end = vaddr + size; | |
221 | ||
222 | dir = pgd_offset_k(vaddr); | |
223 | do { | |
224 | unmap_uncached_pmd(dir, vaddr, end - vaddr); | |
225 | vaddr = vaddr + PGDIR_SIZE; | |
226 | dir++; | |
227 | } while (vaddr && (vaddr < end)); | |
228 | } | |
229 | ||
230 | #define PCXL_SEARCH_LOOP(idx, mask, size) \ | |
231 | for(; res_ptr < res_end; ++res_ptr) \ | |
232 | { \ | |
233 | if(0 == ((*res_ptr) & mask)) { \ | |
234 | *res_ptr |= mask; \ | |
235 | idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \ | |
236 | pcxl_res_hint = idx + (size >> 3); \ | |
237 | goto resource_found; \ | |
238 | } \ | |
239 | } | |
240 | ||
241 | #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \ | |
242 | u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \ | |
243 | u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \ | |
244 | PCXL_SEARCH_LOOP(idx, mask, size); \ | |
245 | res_ptr = (u##size *)&pcxl_res_map[0]; \ | |
246 | PCXL_SEARCH_LOOP(idx, mask, size); \ | |
247 | } | |
248 | ||
249 | unsigned long | |
250 | pcxl_alloc_range(size_t size) | |
251 | { | |
252 | int res_idx; | |
253 | u_long mask, flags; | |
254 | unsigned int pages_needed = size >> PAGE_SHIFT; | |
255 | ||
256 | mask = (u_long) -1L; | |
257 | mask >>= BITS_PER_LONG - pages_needed; | |
258 | ||
259 | DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", | |
260 | size, pages_needed, mask); | |
261 | ||
262 | spin_lock_irqsave(&pcxl_res_lock, flags); | |
263 | ||
264 | if(pages_needed <= 8) { | |
265 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 8); | |
266 | } else if(pages_needed <= 16) { | |
267 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 16); | |
268 | } else if(pages_needed <= 32) { | |
269 | PCXL_FIND_FREE_MAPPING(res_idx, mask, 32); | |
270 | } else { | |
271 | panic("%s: pcxl_alloc_range() Too many pages to map.\n", | |
272 | __FILE__); | |
273 | } | |
274 | ||
275 | dump_resmap(); | |
276 | panic("%s: pcxl_alloc_range() out of dma mapping resources\n", | |
277 | __FILE__); | |
278 | ||
279 | resource_found: | |
280 | ||
281 | DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n", | |
282 | res_idx, mask, pcxl_res_hint); | |
283 | ||
284 | pcxl_used_pages += pages_needed; | |
285 | pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1); | |
286 | ||
287 | spin_unlock_irqrestore(&pcxl_res_lock, flags); | |
288 | ||
289 | dump_resmap(); | |
290 | ||
291 | /* | |
292 | ** return the corresponding vaddr in the pcxl dma map | |
293 | */ | |
294 | return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3))); | |
295 | } | |
296 | ||
297 | #define PCXL_FREE_MAPPINGS(idx, m, size) \ | |
298 | u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \ | |
299 | /* BUG_ON((*res_ptr & m) != m); */ \ | |
300 | *res_ptr &= ~m; | |
301 | ||
302 | /* | |
303 | ** clear bits in the pcxl resource map | |
304 | */ | |
305 | static void | |
306 | pcxl_free_range(unsigned long vaddr, size_t size) | |
307 | { | |
308 | u_long mask, flags; | |
309 | unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3); | |
310 | unsigned int pages_mapped = size >> PAGE_SHIFT; | |
311 | ||
312 | mask = (u_long) -1L; | |
313 | mask >>= BITS_PER_LONG - pages_mapped; | |
314 | ||
315 | DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", | |
316 | res_idx, size, pages_mapped, mask); | |
317 | ||
318 | spin_lock_irqsave(&pcxl_res_lock, flags); | |
319 | ||
320 | if(pages_mapped <= 8) { | |
321 | PCXL_FREE_MAPPINGS(res_idx, mask, 8); | |
322 | } else if(pages_mapped <= 16) { | |
323 | PCXL_FREE_MAPPINGS(res_idx, mask, 16); | |
324 | } else if(pages_mapped <= 32) { | |
325 | PCXL_FREE_MAPPINGS(res_idx, mask, 32); | |
326 | } else { | |
327 | panic("%s: pcxl_free_range() Too many pages to unmap.\n", | |
328 | __FILE__); | |
329 | } | |
330 | ||
331 | pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); | |
332 | pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1); | |
333 | ||
334 | spin_unlock_irqrestore(&pcxl_res_lock, flags); | |
335 | ||
336 | dump_resmap(); | |
337 | } | |
338 | ||
27f282b9 AD |
339 | static int proc_pcxl_dma_show(struct seq_file *m, void *v) |
340 | { | |
341 | #if 0 | |
342 | u_long i = 0; | |
343 | unsigned long *res_ptr = (u_long *)pcxl_res_map; | |
344 | #endif | |
345 | unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */ | |
346 | ||
347 | seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n", | |
348 | PCXL_DMA_MAP_SIZE, total_pages); | |
349 | ||
350 | seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size); | |
351 | ||
352 | seq_puts(m, " total: free: used: % used:\n"); | |
353 | seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size, | |
354 | pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes, | |
355 | (pcxl_used_bytes * 100) / pcxl_res_size); | |
356 | ||
357 | seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages, | |
358 | total_pages - pcxl_used_pages, pcxl_used_pages, | |
359 | (pcxl_used_pages * 100 / total_pages)); | |
360 | ||
361 | #if 0 | |
362 | seq_puts(m, "\nResource bitmap:"); | |
363 | ||
364 | for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) { | |
365 | if ((i & 7) == 0) | |
366 | seq_puts(m,"\n "); | |
367 | seq_printf(m, "%s %08lx", buf, *res_ptr); | |
368 | } | |
369 | #endif | |
370 | seq_putc(m, '\n'); | |
371 | return 0; | |
372 | } | |
373 | ||
374 | static int proc_pcxl_dma_open(struct inode *inode, struct file *file) | |
375 | { | |
376 | return single_open(file, proc_pcxl_dma_show, NULL); | |
377 | } | |
378 | ||
379 | static const struct file_operations proc_pcxl_dma_ops = { | |
380 | .owner = THIS_MODULE, | |
381 | .open = proc_pcxl_dma_open, | |
382 | .read = seq_read, | |
383 | .llseek = seq_lseek, | |
384 | .release = single_release, | |
385 | }; | |
386 | ||
1da177e4 LT |
387 | static int __init |
388 | pcxl_dma_init(void) | |
389 | { | |
b8db8002 GG |
390 | if (pcxl_dma_start == 0) |
391 | return 0; | |
1da177e4 | 392 | |
b8db8002 GG |
393 | spin_lock_init(&pcxl_res_lock); |
394 | pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3); | |
395 | pcxl_res_hint = 0; | |
396 | pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, | |
1da177e4 | 397 | get_order(pcxl_res_size)); |
b8db8002 | 398 | memset(pcxl_res_map, 0, pcxl_res_size); |
e51ec241 | 399 | proc_gsc_root = proc_mkdir("gsc", NULL); |
b8db8002 GG |
400 | if (!proc_gsc_root) |
401 | printk(KERN_WARNING | |
402 | "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); | |
403 | else { | |
404 | struct proc_dir_entry* ent; | |
6f1c86ec DL |
405 | ent = proc_create("pcxl_dma", 0, proc_gsc_root, |
406 | &proc_pcxl_dma_ops); | |
407 | if (!ent) | |
b8db8002 GG |
408 | printk(KERN_WARNING |
409 | "pci-dma.c: Unable to create pcxl_dma /proc entry.\n"); | |
410 | } | |
411 | return 0; | |
1da177e4 LT |
412 | } |
413 | ||
414 | __initcall(pcxl_dma_init); | |
415 | ||
79387179 | 416 | static void *pa11_dma_alloc(struct device *dev, size_t size, |
00085f1e | 417 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
1da177e4 LT |
418 | { |
419 | unsigned long vaddr; | |
420 | unsigned long paddr; | |
421 | int order; | |
422 | ||
423 | order = get_order(size); | |
424 | size = 1 << (order + PAGE_SHIFT); | |
425 | vaddr = pcxl_alloc_range(size); | |
426 | paddr = __get_free_pages(flag, order); | |
427 | flush_kernel_dcache_range(paddr, size); | |
428 | paddr = __pa(paddr); | |
429 | map_uncached_pages(vaddr, size, paddr); | |
430 | *dma_handle = (dma_addr_t) paddr; | |
431 | ||
432 | #if 0 | |
433 | /* This probably isn't needed to support EISA cards. | |
434 | ** ISA cards will certainly only support 24-bit DMA addressing. | |
435 | ** Not clear if we can, want, or need to support ISA. | |
436 | */ | |
437 | if (!dev || *dev->coherent_dma_mask < 0xffffffff) | |
438 | gfp |= GFP_DMA; | |
439 | #endif | |
440 | return (void *)vaddr; | |
441 | } | |
442 | ||
79387179 | 443 | static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, |
00085f1e | 444 | dma_addr_t dma_handle, unsigned long attrs) |
1da177e4 LT |
445 | { |
446 | int order; | |
447 | ||
448 | order = get_order(size); | |
449 | size = 1 << (order + PAGE_SHIFT); | |
450 | unmap_uncached_pages((unsigned long)vaddr, size); | |
451 | pcxl_free_range((unsigned long)vaddr, size); | |
452 | free_pages((unsigned long)__va(dma_handle), order); | |
453 | } | |
454 | ||
79387179 CH |
455 | static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, |
456 | unsigned long offset, size_t size, | |
00085f1e | 457 | enum dma_data_direction direction, unsigned long attrs) |
1da177e4 | 458 | { |
79387179 | 459 | void *addr = page_address(page) + offset; |
8980a7ba | 460 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
461 | |
462 | flush_kernel_dcache_range((unsigned long) addr, size); | |
463 | return virt_to_phys(addr); | |
464 | } | |
465 | ||
79387179 CH |
466 | static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
467 | size_t size, enum dma_data_direction direction, | |
00085f1e | 468 | unsigned long attrs) |
1da177e4 | 469 | { |
8980a7ba | 470 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
471 | |
472 | if (direction == DMA_TO_DEVICE) | |
473 | return; | |
474 | ||
475 | /* | |
476 | * For PCI_DMA_FROMDEVICE this flush is not necessary for the | |
477 | * simple map/unmap case. However, it IS necessary if if | |
478 | * pci_dma_sync_single_* has been called and the buffer reused. | |
479 | */ | |
480 | ||
481 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size); | |
482 | return; | |
483 | } | |
484 | ||
79387179 CH |
485 | static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
486 | int nents, enum dma_data_direction direction, | |
00085f1e | 487 | unsigned long attrs) |
1da177e4 LT |
488 | { |
489 | int i; | |
210bff6d | 490 | struct scatterlist *sg; |
1da177e4 | 491 | |
8980a7ba | 492 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 493 | |
210bff6d AM |
494 | for_each_sg(sglist, sg, nents, i) { |
495 | unsigned long vaddr = (unsigned long)sg_virt(sg); | |
496 | ||
497 | sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr); | |
498 | sg_dma_len(sg) = sg->length; | |
499 | flush_kernel_dcache_range(vaddr, sg->length); | |
1da177e4 LT |
500 | } |
501 | return nents; | |
502 | } | |
503 | ||
79387179 CH |
504 | static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, |
505 | int nents, enum dma_data_direction direction, | |
00085f1e | 506 | unsigned long attrs) |
1da177e4 LT |
507 | { |
508 | int i; | |
210bff6d | 509 | struct scatterlist *sg; |
1da177e4 | 510 | |
8980a7ba | 511 | BUG_ON(direction == DMA_NONE); |
1da177e4 LT |
512 | |
513 | if (direction == DMA_TO_DEVICE) | |
514 | return; | |
515 | ||
516 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | |
517 | ||
210bff6d AM |
518 | for_each_sg(sglist, sg, nents, i) |
519 | flush_kernel_vmap_range(sg_virt(sg), sg->length); | |
1da177e4 LT |
520 | return; |
521 | } | |
522 | ||
79387179 CH |
523 | static void pa11_dma_sync_single_for_cpu(struct device *dev, |
524 | dma_addr_t dma_handle, size_t size, | |
525 | enum dma_data_direction direction) | |
1da177e4 | 526 | { |
8980a7ba | 527 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 528 | |
79387179 CH |
529 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), |
530 | size); | |
1da177e4 LT |
531 | } |
532 | ||
79387179 CH |
533 | static void pa11_dma_sync_single_for_device(struct device *dev, |
534 | dma_addr_t dma_handle, size_t size, | |
535 | enum dma_data_direction direction) | |
1da177e4 | 536 | { |
8980a7ba | 537 | BUG_ON(direction == DMA_NONE); |
1da177e4 | 538 | |
79387179 CH |
539 | flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), |
540 | size); | |
1da177e4 LT |
541 | } |
542 | ||
543 | static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | |
544 | { | |
545 | int i; | |
210bff6d | 546 | struct scatterlist *sg; |
1da177e4 LT |
547 | |
548 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | |
549 | ||
210bff6d AM |
550 | for_each_sg(sglist, sg, nents, i) |
551 | flush_kernel_vmap_range(sg_virt(sg), sg->length); | |
1da177e4 LT |
552 | } |
553 | ||
554 | static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) | |
555 | { | |
556 | int i; | |
210bff6d | 557 | struct scatterlist *sg; |
1da177e4 LT |
558 | |
559 | /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ | |
560 | ||
210bff6d AM |
561 | for_each_sg(sglist, sg, nents, i) |
562 | flush_kernel_vmap_range(sg_virt(sg), sg->length); | |
1da177e4 LT |
563 | } |
564 | ||
79387179 | 565 | struct dma_map_ops pcxl_dma_ops = { |
1da177e4 | 566 | .dma_supported = pa11_dma_supported, |
79387179 CH |
567 | .alloc = pa11_dma_alloc, |
568 | .free = pa11_dma_free, | |
569 | .map_page = pa11_dma_map_page, | |
570 | .unmap_page = pa11_dma_unmap_page, | |
1da177e4 LT |
571 | .map_sg = pa11_dma_map_sg, |
572 | .unmap_sg = pa11_dma_unmap_sg, | |
79387179 CH |
573 | .sync_single_for_cpu = pa11_dma_sync_single_for_cpu, |
574 | .sync_single_for_device = pa11_dma_sync_single_for_device, | |
575 | .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | |
576 | .sync_sg_for_device = pa11_dma_sync_sg_for_device, | |
1da177e4 LT |
577 | }; |
578 | ||
79387179 | 579 | static void *pcx_dma_alloc(struct device *dev, size_t size, |
00085f1e | 580 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
1da177e4 | 581 | { |
6f7d998e | 582 | void *addr; |
1da177e4 | 583 | |
00085f1e | 584 | if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) |
79387179 CH |
585 | return NULL; |
586 | ||
6f7d998e CL |
587 | addr = (void *)__get_free_pages(flag, get_order(size)); |
588 | if (addr) | |
1da177e4 LT |
589 | *dma_handle = (dma_addr_t)virt_to_phys(addr); |
590 | ||
591 | return addr; | |
592 | } | |
593 | ||
79387179 | 594 | static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, |
00085f1e | 595 | dma_addr_t iova, unsigned long attrs) |
1da177e4 | 596 | { |
6f7d998e | 597 | free_pages((unsigned long)vaddr, get_order(size)); |
1da177e4 LT |
598 | return; |
599 | } | |
600 | ||
79387179 | 601 | struct dma_map_ops pcx_dma_ops = { |
1da177e4 | 602 | .dma_supported = pa11_dma_supported, |
79387179 CH |
603 | .alloc = pcx_dma_alloc, |
604 | .free = pcx_dma_free, | |
605 | .map_page = pa11_dma_map_page, | |
606 | .unmap_page = pa11_dma_unmap_page, | |
1da177e4 LT |
607 | .map_sg = pa11_dma_map_sg, |
608 | .unmap_sg = pa11_dma_unmap_sg, | |
79387179 CH |
609 | .sync_single_for_cpu = pa11_dma_sync_single_for_cpu, |
610 | .sync_single_for_device = pa11_dma_sync_single_for_device, | |
611 | .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, | |
612 | .sync_sg_for_device = pa11_dma_sync_sg_for_device, | |
1da177e4 | 613 | }; |