1 // SPDX-License-Identifier: GPL-2.0-only
3 * Dynamic DMA mapping support.
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
21 #define pr_fmt(fmt) "software IO TLB: " fmt
23 #include <linux/cache.h>
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/swiotlb.h>
31 #include <linux/pfn.h>
32 #include <linux/types.h>
33 #include <linux/ctype.h>
34 #include <linux/highmem.h>
35 #include <linux/gfp.h>
36 #include <linux/scatterlist.h>
37 #include <linux/mem_encrypt.h>
38 #include <linux/set_memory.h>
39 #ifdef CONFIG_DEBUG_FS
40 #include <linux/debugfs.h>
42 #ifdef CONFIG_DMA_RESTRICTED_POOL
45 #include <linux/of_fdt.h>
46 #include <linux/of_reserved_mem.h>
47 #include <linux/slab.h>
53 #include <linux/init.h>
54 #include <linux/memblock.h>
55 #include <linux/iommu-helper.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/swiotlb.h>
60 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
63 * Minimum IO TLB size to bother booting with. Systems with mainly
64 * 64bit capable cards will only lightly use the swiotlb. If we can't
65 * allocate a contiguous 1MB, we're probably in trouble anyway.
67 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
69 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
71 enum swiotlb_force swiotlb_force
;
73 struct io_tlb_mem io_tlb_default_mem
;
76 * Max segment that we can provide which (if pages are contingous) will
77 * not be bounced (unless SWIOTLB_FORCE is set).
79 static unsigned int max_segment
;
81 static unsigned long default_nslabs
= IO_TLB_DEFAULT_SIZE
>> IO_TLB_SHIFT
;
84 setup_io_tlb_npages(char *str
)
87 /* avoid tail segment of size < IO_TLB_SEGSIZE */
89 ALIGN(simple_strtoul(str
, &str
, 0), IO_TLB_SEGSIZE
);
93 if (!strcmp(str
, "force"))
94 swiotlb_force
= SWIOTLB_FORCE
;
95 else if (!strcmp(str
, "noforce"))
96 swiotlb_force
= SWIOTLB_NO_FORCE
;
100 early_param("swiotlb", setup_io_tlb_npages
);
102 unsigned int swiotlb_max_segment(void)
104 return io_tlb_default_mem
.nslabs
? max_segment
: 0;
106 EXPORT_SYMBOL_GPL(swiotlb_max_segment
);
108 void swiotlb_set_max_segment(unsigned int val
)
110 if (swiotlb_force
== SWIOTLB_FORCE
)
113 max_segment
= rounddown(val
, PAGE_SIZE
);
116 unsigned long swiotlb_size_or_default(void)
118 return default_nslabs
<< IO_TLB_SHIFT
;
121 void __init
swiotlb_adjust_size(unsigned long size
)
124 * If swiotlb parameter has not been specified, give a chance to
125 * architectures such as those supporting memory encryption to
126 * adjust/expand SWIOTLB size for their use.
128 if (default_nslabs
!= IO_TLB_DEFAULT_SIZE
>> IO_TLB_SHIFT
)
130 size
= ALIGN(size
, IO_TLB_SIZE
);
131 default_nslabs
= ALIGN(size
>> IO_TLB_SHIFT
, IO_TLB_SEGSIZE
);
132 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size
>> 20);
135 void swiotlb_print_info(void)
137 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
140 pr_warn("No low mem\n");
144 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem
->start
, &mem
->end
,
145 (mem
->nslabs
<< IO_TLB_SHIFT
) >> 20);
148 static inline unsigned long io_tlb_offset(unsigned long val
)
150 return val
& (IO_TLB_SEGSIZE
- 1);
153 static inline unsigned long nr_slots(u64 val
)
155 return DIV_ROUND_UP(val
, IO_TLB_SIZE
);
159 * Early SWIOTLB allocation may be too early to allow an architecture to
160 * perform the desired operations. This function allows the architecture to
161 * call SWIOTLB when the operations are possible. It needs to be called
162 * before the SWIOTLB memory is used.
164 void __init
swiotlb_update_mem_attributes(void)
166 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
170 if (!mem
->nslabs
|| mem
->late_alloc
)
172 vaddr
= phys_to_virt(mem
->start
);
173 bytes
= PAGE_ALIGN(mem
->nslabs
<< IO_TLB_SHIFT
);
174 set_memory_decrypted((unsigned long)vaddr
, bytes
>> PAGE_SHIFT
);
175 memset(vaddr
, 0, bytes
);
178 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem
*mem
, phys_addr_t start
,
179 unsigned long nslabs
, bool late_alloc
)
181 void *vaddr
= phys_to_virt(start
);
182 unsigned long bytes
= nslabs
<< IO_TLB_SHIFT
, i
;
184 mem
->nslabs
= nslabs
;
186 mem
->end
= mem
->start
+ bytes
;
188 mem
->late_alloc
= late_alloc
;
190 if (swiotlb_force
== SWIOTLB_FORCE
)
191 mem
->force_bounce
= true;
193 spin_lock_init(&mem
->lock
);
194 for (i
= 0; i
< mem
->nslabs
; i
++) {
195 mem
->slots
[i
].list
= IO_TLB_SEGSIZE
- io_tlb_offset(i
);
196 mem
->slots
[i
].orig_addr
= INVALID_PHYS_ADDR
;
197 mem
->slots
[i
].alloc_size
= 0;
199 memset(vaddr
, 0, bytes
);
202 int __init
swiotlb_init_with_tbl(char *tlb
, unsigned long nslabs
, int verbose
)
204 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
207 if (swiotlb_force
== SWIOTLB_NO_FORCE
)
210 /* protect against double initialization */
211 if (WARN_ON_ONCE(mem
->nslabs
))
214 alloc_size
= PAGE_ALIGN(array_size(sizeof(*mem
->slots
), nslabs
));
215 mem
->slots
= memblock_alloc(alloc_size
, PAGE_SIZE
);
217 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
218 __func__
, alloc_size
, PAGE_SIZE
);
220 swiotlb_init_io_tlb_mem(mem
, __pa(tlb
), nslabs
, false);
223 swiotlb_print_info();
224 swiotlb_set_max_segment(mem
->nslabs
<< IO_TLB_SHIFT
);
229 * Statically reserve bounce buffer space and initialize bounce buffer data
230 * structures for the software IO TLB used to implement the DMA API.
233 swiotlb_init(int verbose
)
235 size_t bytes
= PAGE_ALIGN(default_nslabs
<< IO_TLB_SHIFT
);
238 if (swiotlb_force
== SWIOTLB_NO_FORCE
)
241 /* Get IO TLB memory from the low pages */
242 tlb
= memblock_alloc_low(bytes
, PAGE_SIZE
);
245 if (swiotlb_init_with_tbl(tlb
, default_nslabs
, verbose
))
250 memblock_free_early(__pa(tlb
), bytes
);
252 pr_warn("Cannot allocate buffer");
256 * Systems with larger DMA zones (those that don't support ISA) can
257 * initialize the swiotlb later using the slab allocator if needed.
258 * This should be just like above, but with some error catching.
261 swiotlb_late_init_with_default_size(size_t default_size
)
263 unsigned long nslabs
=
264 ALIGN(default_size
>> IO_TLB_SHIFT
, IO_TLB_SEGSIZE
);
266 unsigned char *vstart
= NULL
;
270 if (swiotlb_force
== SWIOTLB_NO_FORCE
)
274 * Get IO TLB memory from the low pages
276 order
= get_order(nslabs
<< IO_TLB_SHIFT
);
277 nslabs
= SLABS_PER_PAGE
<< order
;
278 bytes
= nslabs
<< IO_TLB_SHIFT
;
280 while ((SLABS_PER_PAGE
<< order
) > IO_TLB_MIN_SLABS
) {
281 vstart
= (void *)__get_free_pages(GFP_DMA
| __GFP_NOWARN
,
291 if (order
!= get_order(bytes
)) {
292 pr_warn("only able to allocate %ld MB\n",
293 (PAGE_SIZE
<< order
) >> 20);
294 nslabs
= SLABS_PER_PAGE
<< order
;
296 rc
= swiotlb_late_init_with_tbl(vstart
, nslabs
);
298 free_pages((unsigned long)vstart
, order
);
304 swiotlb_late_init_with_tbl(char *tlb
, unsigned long nslabs
)
306 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
307 unsigned long bytes
= nslabs
<< IO_TLB_SHIFT
;
309 if (swiotlb_force
== SWIOTLB_NO_FORCE
)
312 /* protect against double initialization */
313 if (WARN_ON_ONCE(mem
->nslabs
))
316 mem
->slots
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
317 get_order(array_size(sizeof(*mem
->slots
), nslabs
)));
321 set_memory_decrypted((unsigned long)tlb
, bytes
>> PAGE_SHIFT
);
322 swiotlb_init_io_tlb_mem(mem
, virt_to_phys(tlb
), nslabs
, true);
324 swiotlb_print_info();
325 swiotlb_set_max_segment(mem
->nslabs
<< IO_TLB_SHIFT
);
329 void __init
swiotlb_exit(void)
331 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
332 unsigned long tbl_vaddr
;
333 size_t tbl_size
, slots_size
;
338 pr_info("tearing down default memory pool\n");
339 tbl_vaddr
= (unsigned long)phys_to_virt(mem
->start
);
340 tbl_size
= PAGE_ALIGN(mem
->end
- mem
->start
);
341 slots_size
= PAGE_ALIGN(array_size(sizeof(*mem
->slots
), mem
->nslabs
));
343 set_memory_encrypted(tbl_vaddr
, tbl_size
>> PAGE_SHIFT
);
344 if (mem
->late_alloc
) {
345 free_pages(tbl_vaddr
, get_order(tbl_size
));
346 free_pages((unsigned long)mem
->slots
, get_order(slots_size
));
348 memblock_free_late(mem
->start
, tbl_size
);
349 memblock_free_late(__pa(mem
->slots
), slots_size
);
352 memset(mem
, 0, sizeof(*mem
));
356 * Return the offset into a iotlb slot required to keep the device happy.
358 static unsigned int swiotlb_align_offset(struct device
*dev
, u64 addr
)
360 return addr
& dma_get_min_align_mask(dev
) & (IO_TLB_SIZE
- 1);
364 * Bounce: copy the swiotlb buffer from or back to the original dma location
366 static void swiotlb_bounce(struct device
*dev
, phys_addr_t tlb_addr
, size_t size
,
367 enum dma_data_direction dir
)
369 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
370 int index
= (tlb_addr
- mem
->start
) >> IO_TLB_SHIFT
;
371 phys_addr_t orig_addr
= mem
->slots
[index
].orig_addr
;
372 size_t alloc_size
= mem
->slots
[index
].alloc_size
;
373 unsigned long pfn
= PFN_DOWN(orig_addr
);
374 unsigned char *vaddr
= phys_to_virt(tlb_addr
);
375 unsigned int tlb_offset
, orig_addr_offset
;
377 if (orig_addr
== INVALID_PHYS_ADDR
)
380 tlb_offset
= tlb_addr
& (IO_TLB_SIZE
- 1);
381 orig_addr_offset
= swiotlb_align_offset(dev
, orig_addr
);
382 if (tlb_offset
< orig_addr_offset
) {
383 dev_WARN_ONCE(dev
, 1,
384 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
385 orig_addr_offset
, tlb_offset
);
389 tlb_offset
-= orig_addr_offset
;
390 if (tlb_offset
> alloc_size
) {
391 dev_WARN_ONCE(dev
, 1,
392 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
393 alloc_size
, size
, tlb_offset
);
397 orig_addr
+= tlb_offset
;
398 alloc_size
-= tlb_offset
;
400 if (size
> alloc_size
) {
401 dev_WARN_ONCE(dev
, 1,
402 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
407 if (PageHighMem(pfn_to_page(pfn
))) {
408 /* The buffer does not have a mapping. Map it in and copy */
409 unsigned int offset
= orig_addr
& ~PAGE_MASK
;
415 sz
= min_t(size_t, PAGE_SIZE
- offset
, size
);
417 local_irq_save(flags
);
418 buffer
= kmap_atomic(pfn_to_page(pfn
));
419 if (dir
== DMA_TO_DEVICE
)
420 memcpy(vaddr
, buffer
+ offset
, sz
);
422 memcpy(buffer
+ offset
, vaddr
, sz
);
423 kunmap_atomic(buffer
);
424 local_irq_restore(flags
);
431 } else if (dir
== DMA_TO_DEVICE
) {
432 memcpy(vaddr
, phys_to_virt(orig_addr
), size
);
434 memcpy(phys_to_virt(orig_addr
), vaddr
, size
);
438 #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
441 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
443 static inline unsigned long get_max_slots(unsigned long boundary_mask
)
445 if (boundary_mask
== ~0UL)
446 return 1UL << (BITS_PER_LONG
- IO_TLB_SHIFT
);
447 return nr_slots(boundary_mask
+ 1);
450 static unsigned int wrap_index(struct io_tlb_mem
*mem
, unsigned int index
)
452 if (index
>= mem
->nslabs
)
458 * Find a suitable number of IO TLB entries size that will fit this request and
459 * allocate a buffer from that IO TLB pool.
461 static int swiotlb_find_slots(struct device
*dev
, phys_addr_t orig_addr
,
464 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
465 unsigned long boundary_mask
= dma_get_seg_boundary(dev
);
466 dma_addr_t tbl_dma_addr
=
467 phys_to_dma_unencrypted(dev
, mem
->start
) & boundary_mask
;
468 unsigned long max_slots
= get_max_slots(boundary_mask
);
469 unsigned int iotlb_align_mask
=
470 dma_get_min_align_mask(dev
) & ~(IO_TLB_SIZE
- 1);
471 unsigned int nslots
= nr_slots(alloc_size
), stride
;
472 unsigned int index
, wrap
, count
= 0, i
;
473 unsigned int offset
= swiotlb_align_offset(dev
, orig_addr
);
479 * For mappings with an alignment requirement don't bother looping to
480 * unaligned slots once we found an aligned one. For allocations of
481 * PAGE_SIZE or larger only look for page aligned allocations.
483 stride
= (iotlb_align_mask
>> IO_TLB_SHIFT
) + 1;
484 if (alloc_size
>= PAGE_SIZE
)
485 stride
= max(stride
, stride
<< (PAGE_SHIFT
- IO_TLB_SHIFT
));
487 spin_lock_irqsave(&mem
->lock
, flags
);
488 if (unlikely(nslots
> mem
->nslabs
- mem
->used
))
491 index
= wrap
= wrap_index(mem
, ALIGN(mem
->index
, stride
));
494 (slot_addr(tbl_dma_addr
, index
) & iotlb_align_mask
) !=
495 (orig_addr
& iotlb_align_mask
)) {
496 index
= wrap_index(mem
, index
+ 1);
501 * If we find a slot that indicates we have 'nslots' number of
502 * contiguous buffers, we allocate the buffers from that slot
503 * and mark the entries as '0' indicating unavailable.
505 if (!iommu_is_span_boundary(index
, nslots
,
506 nr_slots(tbl_dma_addr
),
508 if (mem
->slots
[index
].list
>= nslots
)
511 index
= wrap_index(mem
, index
+ stride
);
512 } while (index
!= wrap
);
515 spin_unlock_irqrestore(&mem
->lock
, flags
);
519 for (i
= index
; i
< index
+ nslots
; i
++) {
520 mem
->slots
[i
].list
= 0;
521 mem
->slots
[i
].alloc_size
=
522 alloc_size
- (offset
+ ((i
- index
) << IO_TLB_SHIFT
));
525 io_tlb_offset(i
) != IO_TLB_SEGSIZE
- 1 &&
526 mem
->slots
[i
].list
; i
--)
527 mem
->slots
[i
].list
= ++count
;
530 * Update the indices to avoid searching in the next round.
532 if (index
+ nslots
< mem
->nslabs
)
533 mem
->index
= index
+ nslots
;
538 spin_unlock_irqrestore(&mem
->lock
, flags
);
542 phys_addr_t
swiotlb_tbl_map_single(struct device
*dev
, phys_addr_t orig_addr
,
543 size_t mapping_size
, size_t alloc_size
,
544 enum dma_data_direction dir
, unsigned long attrs
)
546 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
547 unsigned int offset
= swiotlb_align_offset(dev
, orig_addr
);
550 phys_addr_t tlb_addr
;
553 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
555 if (mem_encrypt_active())
556 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
558 if (mapping_size
> alloc_size
) {
559 dev_warn_once(dev
, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
560 mapping_size
, alloc_size
);
561 return (phys_addr_t
)DMA_MAPPING_ERROR
;
564 index
= swiotlb_find_slots(dev
, orig_addr
, alloc_size
+ offset
);
566 if (!(attrs
& DMA_ATTR_NO_WARN
))
567 dev_warn_ratelimited(dev
,
568 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
569 alloc_size
, mem
->nslabs
, mem
->used
);
570 return (phys_addr_t
)DMA_MAPPING_ERROR
;
574 * Save away the mapping from the original address to the DMA address.
575 * This is needed when we sync the memory. Then we sync the buffer if
578 for (i
= 0; i
< nr_slots(alloc_size
+ offset
); i
++)
579 mem
->slots
[index
+ i
].orig_addr
= slot_addr(orig_addr
, i
);
580 tlb_addr
= slot_addr(mem
->start
, index
) + offset
;
581 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
582 (!(attrs
& DMA_ATTR_OVERWRITE
) || dir
== DMA_TO_DEVICE
||
583 dir
== DMA_BIDIRECTIONAL
))
584 swiotlb_bounce(dev
, tlb_addr
, mapping_size
, DMA_TO_DEVICE
);
588 static void swiotlb_release_slots(struct device
*dev
, phys_addr_t tlb_addr
)
590 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
592 unsigned int offset
= swiotlb_align_offset(dev
, tlb_addr
);
593 int index
= (tlb_addr
- offset
- mem
->start
) >> IO_TLB_SHIFT
;
594 int nslots
= nr_slots(mem
->slots
[index
].alloc_size
+ offset
);
598 * Return the buffer to the free list by setting the corresponding
599 * entries to indicate the number of contiguous entries available.
600 * While returning the entries to the free list, we merge the entries
601 * with slots below and above the pool being returned.
603 spin_lock_irqsave(&mem
->lock
, flags
);
604 if (index
+ nslots
< ALIGN(index
+ 1, IO_TLB_SEGSIZE
))
605 count
= mem
->slots
[index
+ nslots
].list
;
610 * Step 1: return the slots to the free list, merging the slots with
613 for (i
= index
+ nslots
- 1; i
>= index
; i
--) {
614 mem
->slots
[i
].list
= ++count
;
615 mem
->slots
[i
].orig_addr
= INVALID_PHYS_ADDR
;
616 mem
->slots
[i
].alloc_size
= 0;
620 * Step 2: merge the returned slots with the preceding slots, if
621 * available (non zero)
624 io_tlb_offset(i
) != IO_TLB_SEGSIZE
- 1 && mem
->slots
[i
].list
;
626 mem
->slots
[i
].list
= ++count
;
628 spin_unlock_irqrestore(&mem
->lock
, flags
);
632 * tlb_addr is the physical address of the bounce buffer to unmap.
634 void swiotlb_tbl_unmap_single(struct device
*dev
, phys_addr_t tlb_addr
,
635 size_t mapping_size
, enum dma_data_direction dir
,
639 * First, sync the memory before unmapping the entry
641 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
642 (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
))
643 swiotlb_bounce(dev
, tlb_addr
, mapping_size
, DMA_FROM_DEVICE
);
645 swiotlb_release_slots(dev
, tlb_addr
);
648 void swiotlb_sync_single_for_device(struct device
*dev
, phys_addr_t tlb_addr
,
649 size_t size
, enum dma_data_direction dir
)
651 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
652 swiotlb_bounce(dev
, tlb_addr
, size
, DMA_TO_DEVICE
);
654 BUG_ON(dir
!= DMA_FROM_DEVICE
);
657 void swiotlb_sync_single_for_cpu(struct device
*dev
, phys_addr_t tlb_addr
,
658 size_t size
, enum dma_data_direction dir
)
660 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)
661 swiotlb_bounce(dev
, tlb_addr
, size
, DMA_FROM_DEVICE
);
663 BUG_ON(dir
!= DMA_TO_DEVICE
);
667 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
668 * to the device copy the data into it as well.
670 dma_addr_t
swiotlb_map(struct device
*dev
, phys_addr_t paddr
, size_t size
,
671 enum dma_data_direction dir
, unsigned long attrs
)
673 phys_addr_t swiotlb_addr
;
676 trace_swiotlb_bounced(dev
, phys_to_dma(dev
, paddr
), size
,
679 swiotlb_addr
= swiotlb_tbl_map_single(dev
, paddr
, size
, size
, dir
,
681 if (swiotlb_addr
== (phys_addr_t
)DMA_MAPPING_ERROR
)
682 return DMA_MAPPING_ERROR
;
684 /* Ensure that the address returned is DMA'ble */
685 dma_addr
= phys_to_dma_unencrypted(dev
, swiotlb_addr
);
686 if (unlikely(!dma_capable(dev
, dma_addr
, size
, true))) {
687 swiotlb_tbl_unmap_single(dev
, swiotlb_addr
, size
, dir
,
688 attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
689 dev_WARN_ONCE(dev
, 1,
690 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
691 &dma_addr
, size
, *dev
->dma_mask
, dev
->bus_dma_limit
);
692 return DMA_MAPPING_ERROR
;
695 if (!dev_is_dma_coherent(dev
) && !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
696 arch_sync_dma_for_device(swiotlb_addr
, size
, dir
);
700 size_t swiotlb_max_mapping_size(struct device
*dev
)
702 return ((size_t)IO_TLB_SIZE
) * IO_TLB_SEGSIZE
;
705 bool is_swiotlb_active(struct device
*dev
)
707 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
709 return mem
&& mem
->nslabs
;
711 EXPORT_SYMBOL_GPL(is_swiotlb_active
);
713 #ifdef CONFIG_DEBUG_FS
714 static struct dentry
*debugfs_dir
;
716 static void swiotlb_create_debugfs_files(struct io_tlb_mem
*mem
)
718 debugfs_create_ulong("io_tlb_nslabs", 0400, mem
->debugfs
, &mem
->nslabs
);
719 debugfs_create_ulong("io_tlb_used", 0400, mem
->debugfs
, &mem
->used
);
722 static int __init
swiotlb_create_default_debugfs(void)
724 struct io_tlb_mem
*mem
= &io_tlb_default_mem
;
726 debugfs_dir
= debugfs_create_dir("swiotlb", NULL
);
728 mem
->debugfs
= debugfs_dir
;
729 swiotlb_create_debugfs_files(mem
);
734 late_initcall(swiotlb_create_default_debugfs
);
738 #ifdef CONFIG_DMA_RESTRICTED_POOL
740 #ifdef CONFIG_DEBUG_FS
741 static void rmem_swiotlb_debugfs_init(struct reserved_mem
*rmem
)
743 struct io_tlb_mem
*mem
= rmem
->priv
;
745 mem
->debugfs
= debugfs_create_dir(rmem
->name
, debugfs_dir
);
746 swiotlb_create_debugfs_files(mem
);
749 static void rmem_swiotlb_debugfs_init(struct reserved_mem
*rmem
)
754 struct page
*swiotlb_alloc(struct device
*dev
, size_t size
)
756 struct io_tlb_mem
*mem
= dev
->dma_io_tlb_mem
;
757 phys_addr_t tlb_addr
;
763 index
= swiotlb_find_slots(dev
, 0, size
);
767 tlb_addr
= slot_addr(mem
->start
, index
);
769 return pfn_to_page(PFN_DOWN(tlb_addr
));
772 bool swiotlb_free(struct device
*dev
, struct page
*page
, size_t size
)
774 phys_addr_t tlb_addr
= page_to_phys(page
);
776 if (!is_swiotlb_buffer(dev
, tlb_addr
))
779 swiotlb_release_slots(dev
, tlb_addr
);
784 static int rmem_swiotlb_device_init(struct reserved_mem
*rmem
,
787 struct io_tlb_mem
*mem
= rmem
->priv
;
788 unsigned long nslabs
= rmem
->size
>> IO_TLB_SHIFT
;
791 * Since multiple devices can share the same pool, the private data,
792 * io_tlb_mem struct, will be initialized by the first device attached
796 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
800 mem
->slots
= kzalloc(array_size(sizeof(*mem
->slots
), nslabs
),
807 set_memory_decrypted((unsigned long)phys_to_virt(rmem
->base
),
808 rmem
->size
>> PAGE_SHIFT
);
809 swiotlb_init_io_tlb_mem(mem
, rmem
->base
, nslabs
, false);
810 mem
->force_bounce
= true;
811 mem
->for_alloc
= true;
815 rmem_swiotlb_debugfs_init(rmem
);
818 dev
->dma_io_tlb_mem
= mem
;
823 static void rmem_swiotlb_device_release(struct reserved_mem
*rmem
,
826 dev
->dma_io_tlb_mem
= &io_tlb_default_mem
;
829 static const struct reserved_mem_ops rmem_swiotlb_ops
= {
830 .device_init
= rmem_swiotlb_device_init
,
831 .device_release
= rmem_swiotlb_device_release
,
834 static int __init
rmem_swiotlb_setup(struct reserved_mem
*rmem
)
836 unsigned long node
= rmem
->fdt_node
;
838 if (of_get_flat_dt_prop(node
, "reusable", NULL
) ||
839 of_get_flat_dt_prop(node
, "linux,cma-default", NULL
) ||
840 of_get_flat_dt_prop(node
, "linux,dma-default", NULL
) ||
841 of_get_flat_dt_prop(node
, "no-map", NULL
))
844 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem
->base
)))) {
845 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
849 rmem
->ops
= &rmem_swiotlb_ops
;
850 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
851 &rmem
->base
, (unsigned long)rmem
->size
/ SZ_1M
);
855 RESERVEDMEM_OF_DECLARE(dma
, "restricted-dma-pool", rmem_swiotlb_setup
);
856 #endif /* CONFIG_DMA_RESTRICTED_POOL */