]>
Commit | Line | Data |
---|---|---|
9ac7849e TH |
1 | /* |
2 | * drivers/base/dma-mapping.c - arch-independent dma-mapping routines | |
3 | * | |
4 | * Copyright (c) 2006 SUSE Linux Products GmbH | |
5 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | */ | |
9 | ||
09515ef5 | 10 | #include <linux/acpi.h> |
9ac7849e | 11 | #include <linux/dma-mapping.h> |
1b6bc32f | 12 | #include <linux/export.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
09515ef5 | 14 | #include <linux/of_device.h> |
513510dd LA |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> | |
9ac7849e TH |
17 | |
18 | /* | |
19 | * Managed DMA API | |
20 | */ | |
21 | struct dma_devres { | |
22 | size_t size; | |
23 | void *vaddr; | |
24 | dma_addr_t dma_handle; | |
63d36c95 | 25 | unsigned long attrs; |
9ac7849e TH |
26 | }; |
27 | ||
63d36c95 | 28 | static void dmam_release(struct device *dev, void *res) |
9ac7849e TH |
29 | { |
30 | struct dma_devres *this = res; | |
31 | ||
63d36c95 CH |
32 | dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, |
33 | this->attrs); | |
9ac7849e TH |
34 | } |
35 | ||
36 | static int dmam_match(struct device *dev, void *res, void *match_data) | |
37 | { | |
38 | struct dma_devres *this = res, *match = match_data; | |
39 | ||
40 | if (this->vaddr == match->vaddr) { | |
41 | WARN_ON(this->size != match->size || | |
42 | this->dma_handle != match->dma_handle); | |
43 | return 1; | |
44 | } | |
45 | return 0; | |
46 | } | |
47 | ||
48 | /** | |
49 | * dmam_alloc_coherent - Managed dma_alloc_coherent() | |
50 | * @dev: Device to allocate coherent memory for | |
51 | * @size: Size of allocation | |
52 | * @dma_handle: Out argument for allocated DMA handle | |
53 | * @gfp: Allocation flags | |
54 | * | |
55 | * Managed dma_alloc_coherent(). Memory allocated using this function | |
56 | * will be automatically released on driver detach. | |
57 | * | |
58 | * RETURNS: | |
59 | * Pointer to allocated memory on success, NULL on failure. | |
60 | */ | |
6d42d79e | 61 | void *dmam_alloc_coherent(struct device *dev, size_t size, |
9ac7849e TH |
62 | dma_addr_t *dma_handle, gfp_t gfp) |
63 | { | |
64 | struct dma_devres *dr; | |
65 | void *vaddr; | |
66 | ||
63d36c95 | 67 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
68 | if (!dr) |
69 | return NULL; | |
70 | ||
71 | vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); | |
72 | if (!vaddr) { | |
73 | devres_free(dr); | |
74 | return NULL; | |
75 | } | |
76 | ||
77 | dr->vaddr = vaddr; | |
78 | dr->dma_handle = *dma_handle; | |
79 | dr->size = size; | |
80 | ||
81 | devres_add(dev, dr); | |
82 | ||
83 | return vaddr; | |
84 | } | |
85 | EXPORT_SYMBOL(dmam_alloc_coherent); | |
86 | ||
87 | /** | |
88 | * dmam_free_coherent - Managed dma_free_coherent() | |
89 | * @dev: Device to free coherent memory for | |
90 | * @size: Size of allocation | |
91 | * @vaddr: Virtual address of the memory to free | |
92 | * @dma_handle: DMA handle of the memory to free | |
93 | * | |
94 | * Managed dma_free_coherent(). | |
95 | */ | |
96 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | |
97 | dma_addr_t dma_handle) | |
98 | { | |
99 | struct dma_devres match_data = { size, vaddr, dma_handle }; | |
100 | ||
101 | dma_free_coherent(dev, size, vaddr, dma_handle); | |
63d36c95 | 102 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
9ac7849e TH |
103 | } |
104 | EXPORT_SYMBOL(dmam_free_coherent); | |
105 | ||
106 | /** | |
63d36c95 | 107 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
9ac7849e TH |
108 | * @dev: Device to allocate non_coherent memory for |
109 | * @size: Size of allocation | |
110 | * @dma_handle: Out argument for allocated DMA handle | |
111 | * @gfp: Allocation flags | |
63d36c95 | 112 | * @attrs: Flags in the DMA_ATTR_* namespace. |
9ac7849e | 113 | * |
63d36c95 CH |
114 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
115 | * automatically released on driver detach. | |
9ac7849e TH |
116 | * |
117 | * RETURNS: | |
118 | * Pointer to allocated memory on success, NULL on failure. | |
119 | */ | |
63d36c95 CH |
120 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
121 | gfp_t gfp, unsigned long attrs) | |
9ac7849e TH |
122 | { |
123 | struct dma_devres *dr; | |
124 | void *vaddr; | |
125 | ||
63d36c95 | 126 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
9ac7849e TH |
127 | if (!dr) |
128 | return NULL; | |
129 | ||
63d36c95 | 130 | vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); |
9ac7849e TH |
131 | if (!vaddr) { |
132 | devres_free(dr); | |
133 | return NULL; | |
134 | } | |
135 | ||
136 | dr->vaddr = vaddr; | |
137 | dr->dma_handle = *dma_handle; | |
138 | dr->size = size; | |
63d36c95 | 139 | dr->attrs = attrs; |
9ac7849e TH |
140 | |
141 | devres_add(dev, dr); | |
142 | ||
143 | return vaddr; | |
144 | } | |
63d36c95 | 145 | EXPORT_SYMBOL(dmam_alloc_attrs); |
9ac7849e | 146 | |
20d666e4 | 147 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
9ac7849e TH |
148 | |
149 | static void dmam_coherent_decl_release(struct device *dev, void *res) | |
150 | { | |
151 | dma_release_declared_memory(dev); | |
152 | } | |
153 | ||
154 | /** | |
155 | * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() | |
156 | * @dev: Device to declare coherent memory for | |
88a984ba | 157 | * @phys_addr: Physical address of coherent memory to be declared |
9ac7849e TH |
158 | * @device_addr: Device address of coherent memory to be declared |
159 | * @size: Size of coherent memory to be declared | |
160 | * @flags: Flags | |
161 | * | |
162 | * Managed dma_declare_coherent_memory(). | |
163 | * | |
164 | * RETURNS: | |
165 | * 0 on success, -errno on failure. | |
166 | */ | |
88a984ba | 167 | int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
9ac7849e TH |
168 | dma_addr_t device_addr, size_t size, int flags) |
169 | { | |
170 | void *res; | |
171 | int rc; | |
172 | ||
173 | res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); | |
174 | if (!res) | |
175 | return -ENOMEM; | |
176 | ||
88a984ba | 177 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, |
9ac7849e | 178 | flags); |
2436bdcd | 179 | if (!rc) |
9ac7849e | 180 | devres_add(dev, res); |
2436bdcd | 181 | else |
9ac7849e TH |
182 | devres_free(res); |
183 | ||
184 | return rc; | |
185 | } | |
186 | EXPORT_SYMBOL(dmam_declare_coherent_memory); | |
187 | ||
188 | /** | |
189 | * dmam_release_declared_memory - Managed dma_release_declared_memory(). | |
190 | * @dev: Device to release declared coherent memory for | |
191 | * | |
192 | * Managed dmam_release_declared_memory(). | |
193 | */ | |
194 | void dmam_release_declared_memory(struct device *dev) | |
195 | { | |
196 | WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); | |
197 | } | |
198 | EXPORT_SYMBOL(dmam_release_declared_memory); | |
199 | ||
c6c22955 MS |
200 | #endif |
201 | ||
d2b7428e MS |
202 | /* |
203 | * Create scatter-list for the already allocated DMA buffer. | |
204 | */ | |
205 | int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, | |
206 | void *cpu_addr, dma_addr_t handle, size_t size) | |
207 | { | |
208 | struct page *page = virt_to_page(cpu_addr); | |
209 | int ret; | |
210 | ||
211 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | |
212 | if (unlikely(ret)) | |
213 | return ret; | |
214 | ||
215 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | |
216 | return 0; | |
217 | } | |
218 | EXPORT_SYMBOL(dma_common_get_sgtable); | |
219 | ||
64ccc9c0 MS |
220 | /* |
221 | * Create userspace mapping for the DMA-coherent memory. | |
222 | */ | |
223 | int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |
224 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | |
225 | { | |
226 | int ret = -ENXIO; | |
07c75d7a | 227 | #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP |
95da00e3 | 228 | unsigned long user_count = vma_pages(vma); |
64ccc9c0 MS |
229 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
230 | unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
231 | unsigned long off = vma->vm_pgoff; | |
232 | ||
233 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
234 | ||
43fc509c | 235 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
64ccc9c0 MS |
236 | return ret; |
237 | ||
238 | if (off < count && user_count <= (count - off)) { | |
239 | ret = remap_pfn_range(vma, vma->vm_start, | |
240 | pfn + off, | |
241 | user_count << PAGE_SHIFT, | |
242 | vma->vm_page_prot); | |
243 | } | |
07c75d7a | 244 | #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ |
64ccc9c0 MS |
245 | |
246 | return ret; | |
247 | } | |
248 | EXPORT_SYMBOL(dma_common_mmap); | |
513510dd LA |
249 | |
250 | #ifdef CONFIG_MMU | |
1a3389ff CM |
251 | static struct vm_struct *__dma_common_pages_remap(struct page **pages, |
252 | size_t size, unsigned long vm_flags, pgprot_t prot, | |
253 | const void *caller) | |
254 | { | |
255 | struct vm_struct *area; | |
256 | ||
257 | area = get_vm_area_caller(size, vm_flags, caller); | |
258 | if (!area) | |
259 | return NULL; | |
260 | ||
261 | if (map_vm_area(area, prot, pages)) { | |
262 | vunmap(area->addr); | |
263 | return NULL; | |
264 | } | |
265 | ||
266 | return area; | |
267 | } | |
268 | ||
513510dd LA |
269 | /* |
270 | * remaps an array of PAGE_SIZE pages into another vm_area | |
271 | * Cannot be used in non-sleeping contexts | |
272 | */ | |
273 | void *dma_common_pages_remap(struct page **pages, size_t size, | |
274 | unsigned long vm_flags, pgprot_t prot, | |
275 | const void *caller) | |
276 | { | |
277 | struct vm_struct *area; | |
278 | ||
1a3389ff | 279 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); |
513510dd LA |
280 | if (!area) |
281 | return NULL; | |
282 | ||
283 | area->pages = pages; | |
284 | ||
513510dd LA |
285 | return area->addr; |
286 | } | |
287 | ||
288 | /* | |
289 | * remaps an allocated contiguous region into another vm_area. | |
290 | * Cannot be used in non-sleeping contexts | |
291 | */ | |
292 | ||
293 | void *dma_common_contiguous_remap(struct page *page, size_t size, | |
294 | unsigned long vm_flags, | |
295 | pgprot_t prot, const void *caller) | |
296 | { | |
297 | int i; | |
298 | struct page **pages; | |
1a3389ff | 299 | struct vm_struct *area; |
513510dd LA |
300 | |
301 | pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); | |
302 | if (!pages) | |
303 | return NULL; | |
304 | ||
0dd89119 GT |
305 | for (i = 0; i < (size >> PAGE_SHIFT); i++) |
306 | pages[i] = nth_page(page, i); | |
513510dd | 307 | |
1a3389ff | 308 | area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); |
513510dd LA |
309 | |
310 | kfree(pages); | |
311 | ||
1a3389ff CM |
312 | if (!area) |
313 | return NULL; | |
314 | return area->addr; | |
513510dd LA |
315 | } |
316 | ||
317 | /* | |
318 | * unmaps a range previously mapped by dma_common_*_remap | |
319 | */ | |
320 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) | |
321 | { | |
322 | struct vm_struct *area = find_vm_area(cpu_addr); | |
323 | ||
324 | if (!area || (area->flags & vm_flags) != vm_flags) { | |
325 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | |
326 | return; | |
327 | } | |
328 | ||
85714108 | 329 | unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size)); |
513510dd LA |
330 | vunmap(cpu_addr); |
331 | } | |
332 | #endif | |
09515ef5 S |
333 | |
334 | /* | |
335 | * Common configuration to enable DMA API use for a device | |
336 | */ | |
337 | #include <linux/pci.h> | |
338 | ||
339 | int dma_configure(struct device *dev) | |
340 | { | |
341 | struct device *bridge = NULL, *dma_dev = dev; | |
342 | enum dev_dma_attr attr; | |
7b07cbef | 343 | int ret = 0; |
09515ef5 S |
344 | |
345 | if (dev_is_pci(dev)) { | |
346 | bridge = pci_get_host_bridge_device(to_pci_dev(dev)); | |
347 | dma_dev = bridge; | |
348 | if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && | |
349 | dma_dev->parent->of_node) | |
350 | dma_dev = dma_dev->parent; | |
351 | } | |
352 | ||
353 | if (dma_dev->of_node) { | |
7b07cbef | 354 | ret = of_dma_configure(dev, dma_dev->of_node); |
09515ef5 S |
355 | } else if (has_acpi_companion(dma_dev)) { |
356 | attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode)); | |
357 | if (attr != DEV_DMA_NOT_SUPPORTED) | |
5a1bb638 | 358 | ret = acpi_dma_configure(dev, attr); |
09515ef5 S |
359 | } |
360 | ||
361 | if (bridge) | |
362 | pci_put_host_bridge_device(bridge); | |
363 | ||
7b07cbef | 364 | return ret; |
09515ef5 S |
365 | } |
366 | ||
367 | void dma_deconfigure(struct device *dev) | |
368 | { | |
369 | of_dma_deconfigure(dev); | |
370 | acpi_dma_deconfigure(dev); | |
371 | } |