1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-direct.h>
11 #include <linux/dma-noncoherent.h>
12 #include <linux/export.h>
13 #include <linux/gfp.h>
14 #include <linux/of_device.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
24 dma_addr_t dma_handle
;
28 static void dmam_release(struct device
*dev
, void *res
)
30 struct dma_devres
*this = res
;
32 dma_free_attrs(dev
, this->size
, this->vaddr
, this->dma_handle
,
36 static int dmam_match(struct device
*dev
, void *res
, void *match_data
)
38 struct dma_devres
*this = res
, *match
= match_data
;
40 if (this->vaddr
== match
->vaddr
) {
41 WARN_ON(this->size
!= match
->size
||
42 this->dma_handle
!= match
->dma_handle
);
49 * dmam_free_coherent - Managed dma_free_coherent()
50 * @dev: Device to free coherent memory for
51 * @size: Size of allocation
52 * @vaddr: Virtual address of the memory to free
53 * @dma_handle: DMA handle of the memory to free
55 * Managed dma_free_coherent().
57 void dmam_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
58 dma_addr_t dma_handle
)
60 struct dma_devres match_data
= { size
, vaddr
, dma_handle
};
62 dma_free_coherent(dev
, size
, vaddr
, dma_handle
);
63 WARN_ON(devres_destroy(dev
, dmam_release
, dmam_match
, &match_data
));
65 EXPORT_SYMBOL(dmam_free_coherent
);
68 * dmam_alloc_attrs - Managed dma_alloc_attrs()
69 * @dev: Device to allocate non_coherent memory for
70 * @size: Size of allocation
71 * @dma_handle: Out argument for allocated DMA handle
72 * @gfp: Allocation flags
73 * @attrs: Flags in the DMA_ATTR_* namespace.
75 * Managed dma_alloc_attrs(). Memory allocated using this function will be
76 * automatically released on driver detach.
79 * Pointer to allocated memory on success, NULL on failure.
81 void *dmam_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
82 gfp_t gfp
, unsigned long attrs
)
84 struct dma_devres
*dr
;
87 dr
= devres_alloc(dmam_release
, sizeof(*dr
), gfp
);
91 vaddr
= dma_alloc_attrs(dev
, size
, dma_handle
, gfp
, attrs
);
98 dr
->dma_handle
= *dma_handle
;
106 EXPORT_SYMBOL(dmam_alloc_attrs
);
109 * Create scatter-list for the already allocated DMA buffer.
111 int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
112 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
118 if (!dev_is_dma_coherent(dev
)) {
119 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN
))
122 page
= pfn_to_page(arch_dma_coherent_to_pfn(dev
, cpu_addr
,
125 page
= virt_to_page(cpu_addr
);
128 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
130 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
134 int dma_get_sgtable_attrs(struct device
*dev
, struct sg_table
*sgt
,
135 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
138 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
140 if (!dma_is_direct(ops
) && ops
->get_sgtable
)
141 return ops
->get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
143 return dma_common_get_sgtable(dev
, sgt
, cpu_addr
, dma_addr
, size
,
146 EXPORT_SYMBOL(dma_get_sgtable_attrs
);
149 * Create userspace mapping for the DMA-coherent memory.
151 int dma_common_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
152 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
155 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
156 unsigned long user_count
= vma_pages(vma
);
157 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
158 unsigned long off
= vma
->vm_pgoff
;
162 vma
->vm_page_prot
= arch_dma_mmap_pgprot(dev
, vma
->vm_page_prot
, attrs
);
164 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
167 if (off
>= count
|| user_count
> count
- off
)
170 if (!dev_is_dma_coherent(dev
)) {
171 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN
))
173 pfn
= arch_dma_coherent_to_pfn(dev
, cpu_addr
, dma_addr
);
175 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
178 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
179 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
182 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
186 * dma_mmap_attrs - map a coherent DMA allocation into user space
187 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
188 * @vma: vm_area_struct describing requested user mapping
189 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
190 * @dma_addr: device-view address returned from dma_alloc_attrs
191 * @size: size of memory originally requested in dma_alloc_attrs
192 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
194 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
195 * space. The coherent DMA buffer must not be freed by the driver until the
196 * user space mapping has been released.
198 int dma_mmap_attrs(struct device
*dev
, struct vm_area_struct
*vma
,
199 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
202 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
204 if (!dma_is_direct(ops
) && ops
->mmap
)
205 return ops
->mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
206 return dma_common_mmap(dev
, vma
, cpu_addr
, dma_addr
, size
, attrs
);
208 EXPORT_SYMBOL(dma_mmap_attrs
);
210 static u64
dma_default_get_required_mask(struct device
*dev
)
212 u32 low_totalram
= ((max_pfn
- 1) << PAGE_SHIFT
);
213 u32 high_totalram
= ((max_pfn
- 1) >> (32 - PAGE_SHIFT
));
216 if (!high_totalram
) {
217 /* convert to mask just covering totalram */
218 low_totalram
= (1 << (fls(low_totalram
) - 1));
219 low_totalram
+= low_totalram
- 1;
222 high_totalram
= (1 << (fls(high_totalram
) - 1));
223 high_totalram
+= high_totalram
- 1;
224 mask
= (((u64
)high_totalram
) << 32) + 0xffffffff;
229 u64
dma_get_required_mask(struct device
*dev
)
231 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
233 if (dma_is_direct(ops
))
234 return dma_direct_get_required_mask(dev
);
235 if (ops
->get_required_mask
)
236 return ops
->get_required_mask(dev
);
237 return dma_default_get_required_mask(dev
);
239 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
241 #ifndef arch_dma_alloc_attrs
242 #define arch_dma_alloc_attrs(dev) (true)
245 void *dma_alloc_attrs(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
246 gfp_t flag
, unsigned long attrs
)
248 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
251 WARN_ON_ONCE(dev
&& !dev
->coherent_dma_mask
);
253 if (dma_alloc_from_dev_coherent(dev
, size
, dma_handle
, &cpu_addr
))
256 /* let the implementation decide on the zone to allocate from: */
257 flag
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
259 if (!arch_dma_alloc_attrs(&dev
))
262 if (dma_is_direct(ops
))
263 cpu_addr
= dma_direct_alloc(dev
, size
, dma_handle
, flag
, attrs
);
265 cpu_addr
= ops
->alloc(dev
, size
, dma_handle
, flag
, attrs
);
269 debug_dma_alloc_coherent(dev
, size
, *dma_handle
, cpu_addr
);
272 EXPORT_SYMBOL(dma_alloc_attrs
);
274 void dma_free_attrs(struct device
*dev
, size_t size
, void *cpu_addr
,
275 dma_addr_t dma_handle
, unsigned long attrs
)
277 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
279 if (dma_release_from_dev_coherent(dev
, get_order(size
), cpu_addr
))
282 * On non-coherent platforms which implement DMA-coherent buffers via
283 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
284 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
285 * sleep on some machines, and b) an indication that the driver is
286 * probably misusing the coherent API anyway.
288 WARN_ON(irqs_disabled());
293 debug_dma_free_coherent(dev
, size
, cpu_addr
, dma_handle
);
294 if (dma_is_direct(ops
))
295 dma_direct_free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
297 ops
->free(dev
, size
, cpu_addr
, dma_handle
, attrs
);
299 EXPORT_SYMBOL(dma_free_attrs
);
301 static inline void dma_check_mask(struct device
*dev
, u64 mask
)
303 if (sme_active() && (mask
< (((u64
)sme_get_me_mask() << 1) - 1)))
304 dev_warn(dev
, "SME is active, device will require DMA bounce buffers\n");
307 int dma_supported(struct device
*dev
, u64 mask
)
309 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
311 if (dma_is_direct(ops
))
312 return dma_direct_supported(dev
, mask
);
313 if (!ops
->dma_supported
)
315 return ops
->dma_supported(dev
, mask
);
317 EXPORT_SYMBOL(dma_supported
);
319 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
320 void arch_dma_set_mask(struct device
*dev
, u64 mask
);
322 #define arch_dma_set_mask(dev, mask) do { } while (0)
325 int dma_set_mask(struct device
*dev
, u64 mask
)
327 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
330 arch_dma_set_mask(dev
, mask
);
331 dma_check_mask(dev
, mask
);
332 *dev
->dma_mask
= mask
;
335 EXPORT_SYMBOL(dma_set_mask
);
337 #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
338 int dma_set_coherent_mask(struct device
*dev
, u64 mask
)
340 if (!dma_supported(dev
, mask
))
343 dma_check_mask(dev
, mask
);
344 dev
->coherent_dma_mask
= mask
;
347 EXPORT_SYMBOL(dma_set_coherent_mask
);
350 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
351 enum dma_data_direction dir
)
353 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
355 BUG_ON(!valid_dma_direction(dir
));
357 if (dma_is_direct(ops
))
358 arch_dma_cache_sync(dev
, vaddr
, size
, dir
);
359 else if (ops
->cache_sync
)
360 ops
->cache_sync(dev
, vaddr
, size
, dir
);
362 EXPORT_SYMBOL(dma_cache_sync
);
364 size_t dma_max_mapping_size(struct device
*dev
)
366 const struct dma_map_ops
*ops
= get_dma_ops(dev
);
367 size_t size
= SIZE_MAX
;
369 if (dma_is_direct(ops
))
370 size
= dma_direct_max_mapping_size(dev
);
371 else if (ops
&& ops
->max_mapping_size
)
372 size
= ops
->max_mapping_size(dev
);
376 EXPORT_SYMBOL_GPL(dma_max_mapping_size
);