]>
Commit | Line | Data |
---|---|---|
ee7e5516 DES |
1 | /* |
2 | * Coherent per-device memory handling. | |
3 | * Borrowed from i386 | |
4 | */ | |
6b03ae0d | 5 | #include <linux/io.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
ee7e5516 | 7 | #include <linux/kernel.h> |
08a999ce | 8 | #include <linux/module.h> |
ee7e5516 DES |
9 | #include <linux/dma-mapping.h> |
10 | ||
11 | struct dma_coherent_mem { | |
12 | void *virt_base; | |
ed1d218c | 13 | dma_addr_t device_base; |
88a984ba | 14 | unsigned long pfn_base; |
ee7e5516 DES |
15 | int size; |
16 | int flags; | |
17 | unsigned long *bitmap; | |
7bfa5ab6 | 18 | spinlock_t spinlock; |
ee7e5516 DES |
19 | }; |
20 | ||
9e5b3d6f MN |
21 | static bool dma_init_coherent_memory( |
22 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, | |
23 | struct dma_coherent_mem **mem) | |
ee7e5516 | 24 | { |
7bfa5ab6 | 25 | struct dma_coherent_mem *dma_mem = NULL; |
ee7e5516 DES |
26 | void __iomem *mem_base = NULL; |
27 | int pages = size >> PAGE_SHIFT; | |
28 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
29 | ||
30 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | |
31 | goto out; | |
32 | if (!size) | |
33 | goto out; | |
ee7e5516 | 34 | |
6b03ae0d BS |
35 | if (flags & DMA_MEMORY_MAP) |
36 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); | |
37 | else | |
38 | mem_base = ioremap(phys_addr, size); | |
ee7e5516 DES |
39 | if (!mem_base) |
40 | goto out; | |
41 | ||
7bfa5ab6 MS |
42 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
43 | if (!dma_mem) | |
ee7e5516 | 44 | goto out; |
7bfa5ab6 MS |
45 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
46 | if (!dma_mem->bitmap) | |
47 | goto out; | |
48 | ||
49 | dma_mem->virt_base = mem_base; | |
50 | dma_mem->device_base = device_addr; | |
51 | dma_mem->pfn_base = PFN_DOWN(phys_addr); | |
52 | dma_mem->size = pages; | |
53 | dma_mem->flags = flags; | |
54 | spin_lock_init(&dma_mem->spinlock); | |
ee7e5516 | 55 | |
7bfa5ab6 | 56 | *mem = dma_mem; |
9e5b3d6f | 57 | return true; |
ee7e5516 | 58 | |
7bfa5ab6 MS |
59 | out: |
60 | kfree(dma_mem); | |
6b03ae0d BS |
61 | if (mem_base) { |
62 | if (flags & DMA_MEMORY_MAP) | |
63 | memunmap(mem_base); | |
64 | else | |
65 | iounmap(mem_base); | |
66 | } | |
9e5b3d6f | 67 | return false; |
ee7e5516 | 68 | } |
7bfa5ab6 MS |
69 | |
70 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) | |
71 | { | |
72 | if (!mem) | |
73 | return; | |
6b03ae0d BS |
74 | |
75 | if (mem->flags & DMA_MEMORY_MAP) | |
76 | memunmap(mem->virt_base); | |
77 | else | |
78 | iounmap(mem->virt_base); | |
7bfa5ab6 MS |
79 | kfree(mem->bitmap); |
80 | kfree(mem); | |
81 | } | |
82 | ||
83 | static int dma_assign_coherent_memory(struct device *dev, | |
84 | struct dma_coherent_mem *mem) | |
85 | { | |
86 | if (dev->dma_mem) | |
87 | return -EBUSY; | |
88 | ||
89 | dev->dma_mem = mem; | |
90 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | |
96 | dma_addr_t device_addr, size_t size, int flags) | |
97 | { | |
98 | struct dma_coherent_mem *mem; | |
7bfa5ab6 | 99 | |
9e5b3d6f MN |
100 | if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags, |
101 | &mem)) | |
7bfa5ab6 MS |
102 | return 0; |
103 | ||
104 | if (dma_assign_coherent_memory(dev, mem) == 0) | |
9e5b3d6f | 105 | return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO; |
7bfa5ab6 MS |
106 | |
107 | dma_release_coherent_memory(mem); | |
108 | return 0; | |
109 | } | |
ee7e5516 DES |
110 | EXPORT_SYMBOL(dma_declare_coherent_memory); |
111 | ||
112 | void dma_release_declared_memory(struct device *dev) | |
113 | { | |
114 | struct dma_coherent_mem *mem = dev->dma_mem; | |
115 | ||
116 | if (!mem) | |
117 | return; | |
7bfa5ab6 | 118 | dma_release_coherent_memory(mem); |
ee7e5516 | 119 | dev->dma_mem = NULL; |
ee7e5516 DES |
120 | } |
121 | EXPORT_SYMBOL(dma_release_declared_memory); | |
122 | ||
123 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
124 | dma_addr_t device_addr, size_t size) | |
125 | { | |
126 | struct dma_coherent_mem *mem = dev->dma_mem; | |
7bfa5ab6 | 127 | unsigned long flags; |
ee7e5516 | 128 | int pos, err; |
ee7e5516 | 129 | |
d2dc1f4a | 130 | size += device_addr & ~PAGE_MASK; |
ee7e5516 DES |
131 | |
132 | if (!mem) | |
133 | return ERR_PTR(-EINVAL); | |
134 | ||
7bfa5ab6 | 135 | spin_lock_irqsave(&mem->spinlock, flags); |
ee7e5516 | 136 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; |
d2dc1f4a | 137 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
7bfa5ab6 MS |
138 | spin_unlock_irqrestore(&mem->spinlock, flags); |
139 | ||
ee7e5516 DES |
140 | if (err != 0) |
141 | return ERR_PTR(err); | |
142 | return mem->virt_base + (pos << PAGE_SHIFT); | |
143 | } | |
144 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |
145 | ||
b6d4f7e3 | 146 | /** |
cb3952bf | 147 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area |
b6d4f7e3 DES |
148 | * |
149 | * @dev: device from which we allocate memory | |
150 | * @size: size of requested memory area | |
151 | * @dma_handle: This will be filled with the correct dma handle | |
152 | * @ret: This pointer will be filled with the virtual address | |
0609697e | 153 | * to allocated area. |
b6d4f7e3 | 154 | * |
cb3952bf | 155 | * This function should be only called from per-arch dma_alloc_coherent() |
b6d4f7e3 DES |
156 | * to support allocation from per-device coherent memory pools. |
157 | * | |
158 | * Returns 0 if dma_alloc_coherent should continue with allocating from | |
cb3952bf | 159 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
b6d4f7e3 | 160 | */ |
ee7e5516 DES |
161 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
162 | dma_addr_t *dma_handle, void **ret) | |
163 | { | |
eccd83e1 | 164 | struct dma_coherent_mem *mem; |
ee7e5516 | 165 | int order = get_order(size); |
7bfa5ab6 | 166 | unsigned long flags; |
eccd83e1 | 167 | int pageno; |
dd01c75f | 168 | int dma_memory_map; |
ee7e5516 | 169 | |
eccd83e1 AM |
170 | if (!dev) |
171 | return 0; | |
172 | mem = dev->dma_mem; | |
173 | if (!mem) | |
174 | return 0; | |
0609697e PM |
175 | |
176 | *ret = NULL; | |
7bfa5ab6 | 177 | spin_lock_irqsave(&mem->spinlock, flags); |
0609697e | 178 | |
cdf57cab | 179 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
0609697e | 180 | goto err; |
eccd83e1 AM |
181 | |
182 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); | |
0609697e PM |
183 | if (unlikely(pageno < 0)) |
184 | goto err; | |
185 | ||
186 | /* | |
187 | * Memory was found in the per-device area. | |
188 | */ | |
189 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); | |
190 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | |
dd01c75f BH |
191 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); |
192 | spin_unlock_irqrestore(&mem->spinlock, flags); | |
193 | if (dma_memory_map) | |
20d7a35b BS |
194 | memset(*ret, 0, size); |
195 | else | |
196 | memset_io(*ret, 0, size); | |
0609697e | 197 | |
eccd83e1 | 198 | return 1; |
0609697e PM |
199 | |
200 | err: | |
7bfa5ab6 | 201 | spin_unlock_irqrestore(&mem->spinlock, flags); |
0609697e PM |
202 | /* |
203 | * In the case where the allocation can not be satisfied from the | |
204 | * per-device area, try to fall back to generic memory if the | |
205 | * constraints allow it. | |
206 | */ | |
207 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | |
ee7e5516 | 208 | } |
a38409fb | 209 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
ee7e5516 | 210 | |
b6d4f7e3 | 211 | /** |
cb3952bf | 212 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
b6d4f7e3 DES |
213 | * @dev: device from which the memory was allocated |
214 | * @order: the order of pages allocated | |
215 | * @vaddr: virtual address of allocated pages | |
216 | * | |
217 | * This checks whether the memory was allocated from the per-device | |
218 | * coherent memory pool and if so, releases that memory. | |
219 | * | |
220 | * Returns 1 if we correctly released the memory, or 0 if | |
cb3952bf | 221 | * dma_release_coherent() should proceed with releasing memory from |
b6d4f7e3 DES |
222 | * generic pools. |
223 | */ | |
ee7e5516 DES |
224 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
225 | { | |
226 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
227 | ||
228 | if (mem && vaddr >= mem->virt_base && vaddr < | |
229 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
230 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
7bfa5ab6 | 231 | unsigned long flags; |
ee7e5516 | 232 | |
7bfa5ab6 | 233 | spin_lock_irqsave(&mem->spinlock, flags); |
ee7e5516 | 234 | bitmap_release_region(mem->bitmap, page, order); |
7bfa5ab6 | 235 | spin_unlock_irqrestore(&mem->spinlock, flags); |
ee7e5516 DES |
236 | return 1; |
237 | } | |
238 | return 0; | |
239 | } | |
a38409fb | 240 | EXPORT_SYMBOL(dma_release_from_coherent); |
bca0fa5f MS |
241 | |
242 | /** | |
243 | * dma_mmap_from_coherent() - try to mmap the memory allocated from | |
244 | * per-device coherent memory pool to userspace | |
245 | * @dev: device from which the memory was allocated | |
246 | * @vma: vm_area for the userspace memory | |
247 | * @vaddr: cpu address returned by dma_alloc_from_coherent | |
248 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent | |
6e7b4a59 | 249 | * @ret: result from remap_pfn_range() |
bca0fa5f MS |
250 | * |
251 | * This checks whether the memory was allocated from the per-device | |
252 | * coherent memory pool and if so, maps that memory to the provided vma. | |
253 | * | |
ba4d93bc LP |
254 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should |
255 | * proceed with mapping memory from generic pools. | |
bca0fa5f MS |
256 | */ |
257 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |
258 | void *vaddr, size_t size, int *ret) | |
259 | { | |
260 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
261 | ||
262 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | |
263 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
264 | unsigned long off = vma->vm_pgoff; | |
265 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
e688f144 | 266 | int user_count = vma_pages(vma); |
9ca5d4fd | 267 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
bca0fa5f MS |
268 | |
269 | *ret = -ENXIO; | |
270 | if (off < count && user_count <= count - off) { | |
88a984ba | 271 | unsigned long pfn = mem->pfn_base + start + off; |
bca0fa5f MS |
272 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
273 | user_count << PAGE_SHIFT, | |
274 | vma->vm_page_prot); | |
275 | } | |
276 | return 1; | |
277 | } | |
278 | return 0; | |
279 | } | |
280 | EXPORT_SYMBOL(dma_mmap_from_coherent); | |
7bfa5ab6 MS |
281 | |
282 | /* | |
283 | * Support for reserved memory regions defined in device tree | |
284 | */ | |
285 | #ifdef CONFIG_OF_RESERVED_MEM | |
286 | #include <linux/of.h> | |
287 | #include <linux/of_fdt.h> | |
288 | #include <linux/of_reserved_mem.h> | |
289 | ||
290 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) | |
291 | { | |
292 | struct dma_coherent_mem *mem = rmem->priv; | |
293 | ||
294 | if (!mem && | |
9e5b3d6f MN |
295 | !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, |
296 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, | |
297 | &mem)) { | |
7bfa5ab6 MS |
298 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", |
299 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
300 | return -ENODEV; | |
301 | } | |
302 | rmem->priv = mem; | |
303 | dma_assign_coherent_memory(dev, mem); | |
304 | return 0; | |
305 | } | |
306 | ||
307 | static void rmem_dma_device_release(struct reserved_mem *rmem, | |
308 | struct device *dev) | |
309 | { | |
310 | dev->dma_mem = NULL; | |
311 | } | |
312 | ||
313 | static const struct reserved_mem_ops rmem_dma_ops = { | |
314 | .device_init = rmem_dma_device_init, | |
315 | .device_release = rmem_dma_device_release, | |
316 | }; | |
317 | ||
318 | static int __init rmem_dma_setup(struct reserved_mem *rmem) | |
319 | { | |
320 | unsigned long node = rmem->fdt_node; | |
321 | ||
322 | if (of_get_flat_dt_prop(node, "reusable", NULL)) | |
323 | return -EINVAL; | |
324 | ||
325 | #ifdef CONFIG_ARM | |
326 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { | |
327 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); | |
328 | return -EINVAL; | |
329 | } | |
330 | #endif | |
331 | ||
332 | rmem->ops = &rmem_dma_ops; | |
333 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", | |
334 | &rmem->base, (unsigned long)rmem->size / SZ_1M); | |
335 | return 0; | |
336 | } | |
337 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); | |
338 | #endif |