]>
Commit | Line | Data |
---|---|---|
1162b070 VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | /* | |
10 | * DMA Coherent API Notes | |
11 | * | |
12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is | |
2547476a | 13 | * implemented by accessing it using a kernel virtual address, with |
1162b070 VG |
14 | * Cache bit off in the TLB entry. |
15 | * | |
16 | * The default DMA address == Phy address which is 0x8000_0000 based. | |
1162b070 VG |
17 | */ |
18 | ||
19 | #include <linux/dma-mapping.h> | |
f2b0b25a | 20 | #include <asm/cache.h> |
1162b070 VG |
21 | #include <asm/cacheflush.h> |
22 | ||
052c96db CH |
23 | |
24 | static void *arc_dma_alloc(struct device *dev, size_t size, | |
00085f1e | 25 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
1162b070 | 26 | { |
d98a15a5 VG |
27 | unsigned long order = get_order(size); |
28 | struct page *page; | |
29 | phys_addr_t paddr; | |
30 | void *kvaddr; | |
6b700393 | 31 | int need_coh = 1, need_kvaddr = 0; |
1162b070 | 32 | |
d98a15a5 VG |
33 | page = alloc_pages(gfp, order); |
34 | if (!page) | |
1162b070 VG |
35 | return NULL; |
36 | ||
f2b0b25a AB |
37 | /* |
38 | * IOC relies on all data (even coherent DMA data) being in cache | |
39 | * Thus allocate normal cached memory | |
40 | * | |
41 | * The gains with IOC are two pronged: | |
6b700393 | 42 | * -For streaming data, elides need for cache maintenance, saving |
f2b0b25a AB |
43 | * cycles in flush code, and bus bandwidth as all the lines of a |
44 | * buffer need to be flushed out to memory | |
45 | * -For coherent data, Read/Write to buffers terminate early in cache | |
46 | * (vs. always going to memory - thus are faster) | |
47 | */ | |
cf986d47 | 48 | if ((is_isa_arcv2() && ioc_enable) || |
00085f1e | 49 | (attrs & DMA_ATTR_NON_CONSISTENT)) |
6b700393 VG |
50 | need_coh = 0; |
51 | ||
52 | /* | |
53 | * - A coherent buffer needs MMU mapping to enforce non-cachability | |
54 | * - A highmem page needs a virtual handle (hence MMU mapping) | |
55 | * independent of cachability | |
56 | */ | |
57 | if (PageHighMem(page) || need_coh) | |
58 | need_kvaddr = 1; | |
59 | ||
60 | /* This is linear addr (0x8000_0000 based) */ | |
61 | paddr = page_to_phys(page); | |
62 | ||
f2e3d553 | 63 | *dma_handle = plat_phys_to_dma(dev, paddr); |
1162b070 VG |
64 | |
65 | /* This is kernel Virtual address (0x7000_0000 based) */ | |
6b700393 | 66 | if (need_kvaddr) { |
f5db19e9 | 67 | kvaddr = ioremap_nocache(paddr, size); |
6b700393 VG |
68 | if (kvaddr == NULL) { |
69 | __free_pages(page, order); | |
70 | return NULL; | |
71 | } | |
72 | } else { | |
f5db19e9 | 73 | kvaddr = (void *)(u32)paddr; |
d98a15a5 | 74 | } |
1162b070 | 75 | |
795f4558 VG |
76 | /* |
77 | * Evict any existing L1 and/or L2 lines for the backing page | |
78 | * in case it was used earlier as a normal "cached" page. | |
79 | * Yeah this bit us - STAR 9000898266 | |
80 | * | |
81 | * Although core does call flush_cache_vmap(), it gets kvaddr hence | |
82 | * can't be used to efficiently flush L1 and/or L2 which need paddr | |
83 | * Currently flush_cache_vmap nukes the L1 cache completely which | |
84 | * will be optimized as a separate commit | |
85 | */ | |
6b700393 | 86 | if (need_coh) |
f5db19e9 | 87 | dma_cache_wback_inv(paddr, size); |
795f4558 | 88 | |
1162b070 VG |
89 | return kvaddr; |
90 | } | |
1162b070 | 91 | |
052c96db | 92 | static void arc_dma_free(struct device *dev, size_t size, void *vaddr, |
00085f1e | 93 | dma_addr_t dma_handle, unsigned long attrs) |
1162b070 | 94 | { |
b4dff287 VK |
95 | phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); |
96 | struct page *page = virt_to_page(paddr); | |
6b700393 VG |
97 | int is_non_coh = 1; |
98 | ||
00085f1e | 99 | is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || |
cf986d47 | 100 | (is_isa_arcv2() && ioc_enable); |
d98a15a5 | 101 | |
6b700393 | 102 | if (PageHighMem(page) || !is_non_coh) |
052c96db | 103 | iounmap((void __force __iomem *)vaddr); |
1162b070 | 104 | |
d98a15a5 | 105 | __free_pages(page, get_order(size)); |
1162b070 | 106 | } |
1162b070 | 107 | |
a79a8121 AB |
108 | static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
109 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
110 | unsigned long attrs) | |
111 | { | |
112 | unsigned long user_count = vma_pages(vma); | |
113 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | |
114 | unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); | |
115 | unsigned long off = vma->vm_pgoff; | |
116 | int ret = -ENXIO; | |
117 | ||
118 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
119 | ||
43fc509c | 120 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
a79a8121 AB |
121 | return ret; |
122 | ||
123 | if (off < count && user_count <= (count - off)) { | |
124 | ret = remap_pfn_range(vma, vma->vm_start, | |
125 | pfn + off, | |
126 | user_count << PAGE_SHIFT, | |
127 | vma->vm_page_prot); | |
128 | } | |
129 | ||
130 | return ret; | |
131 | } | |
132 | ||
1162b070 | 133 | /* |
052c96db CH |
134 | * streaming DMA Mapping API... |
135 | * CPU accesses page via normal paddr, thus needs to explicitly made | |
136 | * consistent before each use | |
1162b070 | 137 | */ |
f5db19e9 | 138 | static void _dma_cache_sync(phys_addr_t paddr, size_t size, |
052c96db CH |
139 | enum dma_data_direction dir) |
140 | { | |
141 | switch (dir) { | |
142 | case DMA_FROM_DEVICE: | |
143 | dma_cache_inv(paddr, size); | |
144 | break; | |
145 | case DMA_TO_DEVICE: | |
146 | dma_cache_wback(paddr, size); | |
147 | break; | |
148 | case DMA_BIDIRECTIONAL: | |
149 | dma_cache_wback_inv(paddr, size); | |
150 | break; | |
151 | default: | |
f5db19e9 | 152 | pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr); |
052c96db CH |
153 | } |
154 | } | |
155 | ||
2e332fec VG |
156 | /* |
157 | * arc_dma_map_page - map a portion of a page for streaming DMA | |
158 | * | |
159 | * Ensure that any data held in the cache is appropriately discarded | |
160 | * or written back. | |
161 | * | |
162 | * The device owns this memory once this call has completed. The CPU | |
163 | * can regain ownership by calling dma_unmap_page(). | |
164 | * | |
165 | * Note: while it takes struct page as arg, caller can "abuse" it to pass | |
166 | * a region larger than PAGE_SIZE, provided it is physically contiguous | |
167 | * and this still works correctly | |
168 | */ | |
052c96db CH |
169 | static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, |
170 | unsigned long offset, size_t size, enum dma_data_direction dir, | |
00085f1e | 171 | unsigned long attrs) |
052c96db | 172 | { |
f5db19e9 | 173 | phys_addr_t paddr = page_to_phys(page) + offset; |
8a3385d2 AD |
174 | |
175 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
176 | _dma_cache_sync(paddr, size, dir); | |
177 | ||
f2e3d553 | 178 | return plat_phys_to_dma(dev, paddr); |
052c96db CH |
179 | } |
180 | ||
2e332fec VG |
181 | /* |
182 | * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | |
183 | * | |
184 | * After this call, reads by the CPU to the buffer are guaranteed to see | |
185 | * whatever the device wrote there. | |
186 | * | |
187 | * Note: historically this routine was not implemented for ARC | |
188 | */ | |
189 | static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, | |
190 | size_t size, enum dma_data_direction dir, | |
191 | unsigned long attrs) | |
192 | { | |
193 | phys_addr_t paddr = plat_dma_to_phys(dev, handle); | |
194 | ||
195 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) | |
196 | _dma_cache_sync(paddr, size, dir); | |
197 | } | |
198 | ||
052c96db | 199 | static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, |
00085f1e | 200 | int nents, enum dma_data_direction dir, unsigned long attrs) |
052c96db CH |
201 | { |
202 | struct scatterlist *s; | |
203 | int i; | |
204 | ||
205 | for_each_sg(sg, s, nents, i) | |
206 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | |
207 | s->length, dir); | |
208 | ||
209 | return nents; | |
210 | } | |
211 | ||
2e332fec VG |
212 | static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
213 | int nents, enum dma_data_direction dir, | |
214 | unsigned long attrs) | |
215 | { | |
216 | struct scatterlist *s; | |
217 | int i; | |
218 | ||
219 | for_each_sg(sg, s, nents, i) | |
220 | arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, | |
221 | attrs); | |
222 | } | |
223 | ||
052c96db CH |
224 | static void arc_dma_sync_single_for_cpu(struct device *dev, |
225 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
226 | { | |
f2e3d553 | 227 | _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE); |
052c96db CH |
228 | } |
229 | ||
230 | static void arc_dma_sync_single_for_device(struct device *dev, | |
231 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) | |
1162b070 | 232 | { |
f2e3d553 | 233 | _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE); |
1162b070 | 234 | } |
052c96db CH |
235 | |
236 | static void arc_dma_sync_sg_for_cpu(struct device *dev, | |
237 | struct scatterlist *sglist, int nelems, | |
238 | enum dma_data_direction dir) | |
239 | { | |
240 | int i; | |
241 | struct scatterlist *sg; | |
242 | ||
243 | for_each_sg(sglist, sg, nelems, i) | |
971573cf | 244 | _dma_cache_sync(sg_phys(sg), sg->length, dir); |
052c96db CH |
245 | } |
246 | ||
247 | static void arc_dma_sync_sg_for_device(struct device *dev, | |
248 | struct scatterlist *sglist, int nelems, | |
249 | enum dma_data_direction dir) | |
250 | { | |
251 | int i; | |
252 | struct scatterlist *sg; | |
253 | ||
254 | for_each_sg(sglist, sg, nelems, i) | |
971573cf | 255 | _dma_cache_sync(sg_phys(sg), sg->length, dir); |
052c96db CH |
256 | } |
257 | ||
258 | static int arc_dma_supported(struct device *dev, u64 dma_mask) | |
259 | { | |
260 | /* Support 32 bit DMA mask exclusively */ | |
261 | return dma_mask == DMA_BIT_MASK(32); | |
262 | } | |
263 | ||
5299709d | 264 | const struct dma_map_ops arc_dma_ops = { |
052c96db CH |
265 | .alloc = arc_dma_alloc, |
266 | .free = arc_dma_free, | |
a79a8121 | 267 | .mmap = arc_dma_mmap, |
052c96db | 268 | .map_page = arc_dma_map_page, |
2e332fec | 269 | .unmap_page = arc_dma_unmap_page, |
052c96db | 270 | .map_sg = arc_dma_map_sg, |
2e332fec | 271 | .unmap_sg = arc_dma_unmap_sg, |
052c96db CH |
272 | .sync_single_for_device = arc_dma_sync_single_for_device, |
273 | .sync_single_for_cpu = arc_dma_sync_single_for_cpu, | |
274 | .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, | |
275 | .sync_sg_for_device = arc_dma_sync_sg_for_device, | |
276 | .dma_supported = arc_dma_supported, | |
277 | }; | |
278 | EXPORT_SYMBOL(arc_dma_ops); |