]>
Commit | Line | Data |
---|---|---|
5f97f7f9 HS |
1 | /* |
2 | * Copyright (C) 2004-2006 Atmel Corporation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 10 | #include <linux/gfp.h> |
5f97f7f9 HS |
11 | |
12 | #include <asm/addrspace.h> | |
13 | #include <asm/cacheflush.h> | |
14 | ||
d3fa72e4 | 15 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) |
5f97f7f9 HS |
16 | { |
17 | /* | |
18 | * No need to sync an uncached area | |
19 | */ | |
20 | if (PXSEG(vaddr) == P2SEG) | |
21 | return; | |
22 | ||
23 | switch (direction) { | |
24 | case DMA_FROM_DEVICE: /* invalidate only */ | |
622a9edd | 25 | invalidate_dcache_region(vaddr, size); |
5f97f7f9 HS |
26 | break; |
27 | case DMA_TO_DEVICE: /* writeback only */ | |
622a9edd | 28 | clean_dcache_region(vaddr, size); |
5f97f7f9 HS |
29 | break; |
30 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | |
622a9edd | 31 | flush_dcache_region(vaddr, size); |
5f97f7f9 HS |
32 | break; |
33 | default: | |
34 | BUG(); | |
35 | } | |
36 | } | |
37 | EXPORT_SYMBOL(dma_cache_sync); | |
38 | ||
39 | static struct page *__dma_alloc(struct device *dev, size_t size, | |
40 | dma_addr_t *handle, gfp_t gfp) | |
41 | { | |
42 | struct page *page, *free, *end; | |
43 | int order; | |
44 | ||
3611553e HS |
45 | /* Following is a work-around (a.k.a. hack) to prevent pages |
46 | * with __GFP_COMP being passed to split_page() which cannot | |
47 | * handle them. The real problem is that this flag probably | |
48 | * should be 0 on AVR32 as it is not supported on this | |
49 | * platform--see CONFIG_HUGETLB_PAGE. */ | |
50 | gfp &= ~(__GFP_COMP); | |
51 | ||
5f97f7f9 HS |
52 | size = PAGE_ALIGN(size); |
53 | order = get_order(size); | |
54 | ||
55 | page = alloc_pages(gfp, order); | |
56 | if (!page) | |
57 | return NULL; | |
58 | split_page(page, order); | |
59 | ||
60 | /* | |
61 | * When accessing physical memory with valid cache data, we | |
62 | * get a cache hit even if the virtual memory region is marked | |
63 | * as uncached. | |
64 | * | |
65 | * Since the memory is newly allocated, there is no point in | |
66 | * doing a writeback. If the previous owner cares, he should | |
67 | * have flushed the cache before releasing the memory. | |
68 | */ | |
69 | invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); | |
70 | ||
71 | *handle = page_to_bus(page); | |
72 | free = page + (size >> PAGE_SHIFT); | |
73 | end = page + (1 << order); | |
74 | ||
75 | /* | |
76 | * Free any unused pages | |
77 | */ | |
78 | while (free < end) { | |
79 | __free_page(free); | |
80 | free++; | |
81 | } | |
82 | ||
83 | return page; | |
84 | } | |
85 | ||
86 | static void __dma_free(struct device *dev, size_t size, | |
87 | struct page *page, dma_addr_t handle) | |
88 | { | |
89 | struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); | |
90 | ||
91 | while (page < end) | |
92 | __free_page(page++); | |
93 | } | |
94 | ||
95 | void *dma_alloc_coherent(struct device *dev, size_t size, | |
96 | dma_addr_t *handle, gfp_t gfp) | |
97 | { | |
98 | struct page *page; | |
99 | void *ret = NULL; | |
100 | ||
101 | page = __dma_alloc(dev, size, handle, gfp); | |
102 | if (page) | |
103 | ret = phys_to_uncached(page_to_phys(page)); | |
104 | ||
105 | return ret; | |
106 | } | |
107 | EXPORT_SYMBOL(dma_alloc_coherent); | |
108 | ||
109 | void dma_free_coherent(struct device *dev, size_t size, | |
110 | void *cpu_addr, dma_addr_t handle) | |
111 | { | |
112 | void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); | |
113 | struct page *page; | |
114 | ||
115 | pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", | |
116 | cpu_addr, (unsigned long)handle, (unsigned)size); | |
117 | BUG_ON(!virt_addr_valid(addr)); | |
118 | page = virt_to_page(addr); | |
119 | __dma_free(dev, size, page, handle); | |
120 | } | |
121 | EXPORT_SYMBOL(dma_free_coherent); | |
122 | ||
5f97f7f9 HS |
123 | void *dma_alloc_writecombine(struct device *dev, size_t size, |
124 | dma_addr_t *handle, gfp_t gfp) | |
125 | { | |
126 | struct page *page; | |
a492dbb9 | 127 | dma_addr_t phys; |
5f97f7f9 HS |
128 | |
129 | page = __dma_alloc(dev, size, handle, gfp); | |
a492dbb9 HS |
130 | if (!page) |
131 | return NULL; | |
132 | ||
133 | phys = page_to_phys(page); | |
134 | *handle = phys; | |
5f97f7f9 HS |
135 | |
136 | /* Now, map the page into P3 with write-combining turned on */ | |
a492dbb9 | 137 | return __ioremap(phys, size, _PAGE_BUFFER); |
5f97f7f9 HS |
138 | } |
139 | EXPORT_SYMBOL(dma_alloc_writecombine); | |
140 | ||
141 | void dma_free_writecombine(struct device *dev, size_t size, | |
142 | void *cpu_addr, dma_addr_t handle) | |
143 | { | |
144 | struct page *page; | |
145 | ||
146 | iounmap(cpu_addr); | |
147 | ||
a492dbb9 | 148 | page = phys_to_page(handle); |
5f97f7f9 HS |
149 | __dma_free(dev, size, page, handle); |
150 | } | |
151 | EXPORT_SYMBOL(dma_free_writecombine); |