1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018-2020 Christoph Hellwig.
5 * DMA operations that map physical memory directly without using an IOMMU.
7 #include <linux/memblock.h> /* for max_pfn */
8 #include <linux/export.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/scatterlist.h>
12 #include <linux/pfn.h>
13 #include <linux/vmalloc.h>
14 #include <linux/set_memory.h>
15 #include <linux/slab.h>
19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
23 unsigned int zone_dma_bits __ro_after_init
= 24;
25 static inline dma_addr_t
phys_to_dma_direct(struct device
*dev
,
28 if (force_dma_unencrypted(dev
))
29 return phys_to_dma_unencrypted(dev
, phys
);
30 return phys_to_dma(dev
, phys
);
33 static inline struct page
*dma_direct_to_page(struct device
*dev
,
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev
, dma_addr
)));
39 u64
dma_direct_get_required_mask(struct device
*dev
)
41 phys_addr_t phys
= (phys_addr_t
)(max_pfn
- 1) << PAGE_SHIFT
;
42 u64 max_dma
= phys_to_dma_direct(dev
, phys
);
44 return (1ULL << (fls64(max_dma
) - 1)) * 2 - 1;
47 static gfp_t
dma_direct_optimal_gfp_mask(struct device
*dev
, u64 dma_mask
,
50 u64 dma_limit
= min_not_zero(dma_mask
, dev
->bus_dma_limit
);
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
60 *phys_limit
= dma_to_phys(dev
, dma_limit
);
61 if (*phys_limit
<= DMA_BIT_MASK(zone_dma_bits
))
63 if (*phys_limit
<= DMA_BIT_MASK(32))
68 static bool dma_coherent_ok(struct device
*dev
, phys_addr_t phys
, size_t size
)
70 dma_addr_t dma_addr
= phys_to_dma_direct(dev
, phys
);
72 if (dma_addr
== DMA_MAPPING_ERROR
)
74 return dma_addr
+ size
- 1 <=
75 min_not_zero(dev
->coherent_dma_mask
, dev
->bus_dma_limit
);
78 static struct page
*__dma_direct_alloc_pages(struct device
*dev
, size_t size
,
81 int node
= dev_to_node(dev
);
82 struct page
*page
= NULL
;
85 WARN_ON_ONCE(!PAGE_ALIGNED(size
));
87 gfp
|= dma_direct_optimal_gfp_mask(dev
, dev
->coherent_dma_mask
,
89 page
= dma_alloc_contiguous(dev
, size
, gfp
);
90 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
91 dma_free_contiguous(dev
, page
, size
);
96 page
= alloc_pages_node(node
, gfp
, get_order(size
));
97 if (page
&& !dma_coherent_ok(dev
, page_to_phys(page
), size
)) {
98 dma_free_contiguous(dev
, page
, size
);
101 if (IS_ENABLED(CONFIG_ZONE_DMA32
) &&
102 phys_limit
< DMA_BIT_MASK(64) &&
103 !(gfp
& (GFP_DMA32
| GFP_DMA
))) {
108 if (IS_ENABLED(CONFIG_ZONE_DMA
) && !(gfp
& GFP_DMA
)) {
109 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
117 static void *dma_direct_alloc_from_pool(struct device
*dev
, size_t size
,
118 dma_addr_t
*dma_handle
, gfp_t gfp
)
124 gfp
|= dma_direct_optimal_gfp_mask(dev
, dev
->coherent_dma_mask
,
126 page
= dma_alloc_from_pool(dev
, size
, &ret
, gfp
, dma_coherent_ok
);
129 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
133 void *dma_direct_alloc(struct device
*dev
, size_t size
,
134 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
140 size
= PAGE_ALIGN(size
);
141 if (attrs
& DMA_ATTR_NO_WARN
)
144 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
145 !force_dma_unencrypted(dev
)) {
146 page
= __dma_direct_alloc_pages(dev
, size
, gfp
& ~__GFP_ZERO
);
149 /* remove any dirty cache lines on the kernel alias */
150 if (!PageHighMem(page
))
151 arch_dma_prep_coherent(page
, size
);
152 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
153 /* return the page pointer as the opaque cookie */
157 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
158 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
159 !dev_is_dma_coherent(dev
))
160 return arch_dma_alloc(dev
, size
, dma_handle
, gfp
, attrs
);
163 * Remapping or decrypting memory may block. If either is required and
164 * we can't block, allocate the memory from the atomic pools.
166 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
167 !gfpflags_allow_blocking(gfp
) &&
168 (force_dma_unencrypted(dev
) ||
169 (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) && !dev_is_dma_coherent(dev
))))
170 return dma_direct_alloc_from_pool(dev
, size
, dma_handle
, gfp
);
172 /* we always manually zero the memory once we are done */
173 page
= __dma_direct_alloc_pages(dev
, size
, gfp
& ~__GFP_ZERO
);
177 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
178 !dev_is_dma_coherent(dev
)) ||
179 (IS_ENABLED(CONFIG_DMA_REMAP
) && PageHighMem(page
))) {
180 /* remove any dirty cache lines on the kernel alias */
181 arch_dma_prep_coherent(page
, size
);
183 /* create a coherent mapping */
184 ret
= dma_common_contiguous_remap(page
, size
,
185 dma_pgprot(dev
, PAGE_KERNEL
, attrs
),
186 __builtin_return_address(0));
189 if (force_dma_unencrypted(dev
)) {
190 err
= set_memory_decrypted((unsigned long)ret
,
191 1 << get_order(size
));
195 memset(ret
, 0, size
);
199 if (PageHighMem(page
)) {
201 * Depending on the cma= arguments and per-arch setup
202 * dma_alloc_contiguous could return highmem pages.
203 * Without remapping there is no way to return them here,
204 * so log an error and fail.
206 dev_info(dev
, "Rejecting highmem page from CMA.\n");
210 ret
= page_address(page
);
211 if (force_dma_unencrypted(dev
)) {
212 err
= set_memory_decrypted((unsigned long)ret
,
213 1 << get_order(size
));
218 memset(ret
, 0, size
);
220 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
221 !dev_is_dma_coherent(dev
)) {
222 arch_dma_prep_coherent(page
, size
);
223 ret
= arch_dma_set_uncached(ret
, size
);
225 goto out_encrypt_pages
;
228 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
232 if (force_dma_unencrypted(dev
)) {
233 err
= set_memory_encrypted((unsigned long)page_address(page
),
234 1 << get_order(size
));
235 /* If memory cannot be re-encrypted, it must be leaked */
240 dma_free_contiguous(dev
, page
, size
);
244 void dma_direct_free(struct device
*dev
, size_t size
,
245 void *cpu_addr
, dma_addr_t dma_addr
, unsigned long attrs
)
247 unsigned int page_order
= get_order(size
);
249 if ((attrs
& DMA_ATTR_NO_KERNEL_MAPPING
) &&
250 !force_dma_unencrypted(dev
)) {
251 /* cpu_addr is a struct page cookie, not a kernel address */
252 dma_free_contiguous(dev
, cpu_addr
, size
);
256 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED
) &&
257 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
258 !dev_is_dma_coherent(dev
)) {
259 arch_dma_free(dev
, size
, cpu_addr
, dma_addr
, attrs
);
263 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
264 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
265 dma_free_from_pool(dev
, cpu_addr
, PAGE_ALIGN(size
)))
268 if (force_dma_unencrypted(dev
))
269 set_memory_encrypted((unsigned long)cpu_addr
, 1 << page_order
);
271 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
))
273 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED
))
274 arch_dma_clear_uncached(cpu_addr
, size
);
276 dma_free_contiguous(dev
, dma_direct_to_page(dev
, dma_addr
), size
);
279 struct page
*dma_direct_alloc_pages(struct device
*dev
, size_t size
,
280 dma_addr_t
*dma_handle
, enum dma_data_direction dir
, gfp_t gfp
)
285 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
286 force_dma_unencrypted(dev
) && !gfpflags_allow_blocking(gfp
))
287 return dma_direct_alloc_from_pool(dev
, size
, dma_handle
, gfp
);
289 page
= __dma_direct_alloc_pages(dev
, size
, gfp
);
292 if (PageHighMem(page
)) {
294 * Depending on the cma= arguments and per-arch setup
295 * dma_alloc_contiguous could return highmem pages.
296 * Without remapping there is no way to return them here,
297 * so log an error and fail.
299 dev_info(dev
, "Rejecting highmem page from CMA.\n");
303 ret
= page_address(page
);
304 if (force_dma_unencrypted(dev
)) {
305 if (set_memory_decrypted((unsigned long)ret
,
306 1 << get_order(size
)))
309 memset(ret
, 0, size
);
310 *dma_handle
= phys_to_dma_direct(dev
, page_to_phys(page
));
313 dma_free_contiguous(dev
, page
, size
);
317 void dma_direct_free_pages(struct device
*dev
, size_t size
,
318 struct page
*page
, dma_addr_t dma_addr
,
319 enum dma_data_direction dir
)
321 unsigned int page_order
= get_order(size
);
322 void *vaddr
= page_address(page
);
324 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
325 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL
) &&
326 dma_free_from_pool(dev
, vaddr
, size
))
329 if (force_dma_unencrypted(dev
))
330 set_memory_encrypted((unsigned long)vaddr
, 1 << page_order
);
332 dma_free_contiguous(dev
, page
, size
);
335 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
336 defined(CONFIG_SWIOTLB)
337 void dma_direct_sync_sg_for_device(struct device
*dev
,
338 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
340 struct scatterlist
*sg
;
343 for_each_sg(sgl
, sg
, nents
, i
) {
344 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
346 if (unlikely(is_swiotlb_buffer(paddr
)))
347 swiotlb_sync_single_for_device(dev
, paddr
, sg
->length
,
350 if (!dev_is_dma_coherent(dev
))
351 arch_sync_dma_for_device(paddr
, sg
->length
,
357 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
358 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
359 defined(CONFIG_SWIOTLB)
360 void dma_direct_sync_sg_for_cpu(struct device
*dev
,
361 struct scatterlist
*sgl
, int nents
, enum dma_data_direction dir
)
363 struct scatterlist
*sg
;
366 for_each_sg(sgl
, sg
, nents
, i
) {
367 phys_addr_t paddr
= dma_to_phys(dev
, sg_dma_address(sg
));
369 if (!dev_is_dma_coherent(dev
))
370 arch_sync_dma_for_cpu(paddr
, sg
->length
, dir
);
372 if (unlikely(is_swiotlb_buffer(paddr
)))
373 swiotlb_sync_single_for_cpu(dev
, paddr
, sg
->length
,
376 if (dir
== DMA_FROM_DEVICE
)
377 arch_dma_mark_clean(paddr
, sg
->length
);
380 if (!dev_is_dma_coherent(dev
))
381 arch_sync_dma_for_cpu_all();
384 void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
385 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
387 struct scatterlist
*sg
;
390 for_each_sg(sgl
, sg
, nents
, i
)
391 dma_direct_unmap_page(dev
, sg
->dma_address
, sg_dma_len(sg
), dir
,
396 int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
, int nents
,
397 enum dma_data_direction dir
, unsigned long attrs
)
400 struct scatterlist
*sg
;
402 for_each_sg(sgl
, sg
, nents
, i
) {
403 sg
->dma_address
= dma_direct_map_page(dev
, sg_page(sg
),
404 sg
->offset
, sg
->length
, dir
, attrs
);
405 if (sg
->dma_address
== DMA_MAPPING_ERROR
)
407 sg_dma_len(sg
) = sg
->length
;
413 dma_direct_unmap_sg(dev
, sgl
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
417 dma_addr_t
dma_direct_map_resource(struct device
*dev
, phys_addr_t paddr
,
418 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
420 dma_addr_t dma_addr
= paddr
;
422 if (unlikely(!dma_capable(dev
, dma_addr
, size
, false))) {
424 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
425 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
427 return DMA_MAPPING_ERROR
;
433 int dma_direct_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
434 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
437 struct page
*page
= dma_direct_to_page(dev
, dma_addr
);
440 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
442 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
446 bool dma_direct_can_mmap(struct device
*dev
)
448 return dev_is_dma_coherent(dev
) ||
449 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP
);
452 int dma_direct_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
453 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
456 unsigned long user_count
= vma_pages(vma
);
457 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
458 unsigned long pfn
= PHYS_PFN(dma_to_phys(dev
, dma_addr
));
461 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
463 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
466 if (vma
->vm_pgoff
>= count
|| user_count
> count
- vma
->vm_pgoff
)
468 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ vma
->vm_pgoff
,
469 user_count
<< PAGE_SHIFT
, vma
->vm_page_prot
);
472 int dma_direct_supported(struct device
*dev
, u64 mask
)
474 u64 min_mask
= (max_pfn
- 1) << PAGE_SHIFT
;
477 * Because 32-bit DMA masks are so common we expect every architecture
478 * to be able to satisfy them - either by not supporting more physical
479 * memory, or by providing a ZONE_DMA32. If neither is the case, the
480 * architecture needs to use an IOMMU instead of the direct mapping.
482 if (mask
>= DMA_BIT_MASK(32))
486 * This check needs to be against the actual bit mask value, so use
487 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
490 if (IS_ENABLED(CONFIG_ZONE_DMA
))
491 min_mask
= min_t(u64
, min_mask
, DMA_BIT_MASK(zone_dma_bits
));
492 return mask
>= phys_to_dma_unencrypted(dev
, min_mask
);
495 size_t dma_direct_max_mapping_size(struct device
*dev
)
497 /* If SWIOTLB is active, use its maximum mapping size */
498 if (is_swiotlb_active() &&
499 (dma_addressing_limited(dev
) || swiotlb_force
== SWIOTLB_FORCE
))
500 return swiotlb_max_mapping_size(dev
);
504 bool dma_direct_need_sync(struct device
*dev
, dma_addr_t dma_addr
)
506 return !dev_is_dma_coherent(dev
) ||
507 is_swiotlb_buffer(dma_to_phys(dev
, dma_addr
));
511 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
512 * @dev: device pointer; needed to "own" the alloced memory.
513 * @cpu_start: beginning of memory region covered by this offset.
514 * @dma_start: beginning of DMA/PCI region covered by this offset.
515 * @size: size of the region.
517 * This is for the simple case of a uniform offset which cannot
518 * be discovered by "dma-ranges".
520 * It returns -ENOMEM if out of memory, -EINVAL if a map
521 * already exists, 0 otherwise.
523 * Note: any call to this from a driver is a bug. The mapping needs
524 * to be described by the device tree or other firmware interfaces.
526 int dma_direct_set_offset(struct device
*dev
, phys_addr_t cpu_start
,
527 dma_addr_t dma_start
, u64 size
)
529 struct bus_dma_region
*map
;
530 u64 offset
= (u64
)cpu_start
- (u64
)dma_start
;
532 if (dev
->dma_range_map
) {
533 dev_err(dev
, "attempt to add DMA range to existing map\n");
540 map
= kcalloc(2, sizeof(*map
), GFP_KERNEL
);
543 map
[0].cpu_start
= cpu_start
;
544 map
[0].dma_start
= dma_start
;
545 map
[0].offset
= offset
;
547 dev
->dma_range_map
= map
;