1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * Rewrite, cleanup, new allocation schemes, virtual merging:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * and Ben. Herrenschmidt, IBM Corporation
9 * Dynamic DMA mapping support, bus-independent parts.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
38 #include <asm/mmu_context.h>
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data
, u64
*val
)
45 struct iommu_table
*tbl
= data
;
46 *val
= bitmap_weight(tbl
->it_map
, tbl
->it_size
);
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight
, iommu_debugfs_weight_get
, NULL
, "%llu\n");
51 static void iommu_debugfs_add(struct iommu_table
*tbl
)
54 struct dentry
*liobn_entry
;
56 sprintf(name
, "%08lx", tbl
->it_index
);
57 liobn_entry
= debugfs_create_dir(name
, iommu_debugfs_dir
);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry
, tbl
, &iommu_debugfs_fops_weight
);
60 debugfs_create_ulong("it_size", 0400, liobn_entry
, &tbl
->it_size
);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry
, &tbl
->it_page_shift
);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry
, &tbl
->it_reserved_start
);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry
, &tbl
->it_reserved_end
);
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry
, &tbl
->it_indirect_levels
);
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry
, &tbl
->it_level_size
);
68 static void iommu_debugfs_del(struct iommu_table
*tbl
)
71 struct dentry
*liobn_entry
;
73 sprintf(name
, "%08lx", tbl
->it_index
);
74 liobn_entry
= debugfs_lookup(name
, iommu_debugfs_dir
);
75 debugfs_remove(liobn_entry
);
78 static void iommu_debugfs_add(struct iommu_table
*tbl
){}
79 static void iommu_debugfs_del(struct iommu_table
*tbl
){}
84 static void __iommu_free(struct iommu_table
*, dma_addr_t
, unsigned int);
86 static int __init
setup_iommu(char *str
)
88 if (!strcmp(str
, "novmerge"))
90 else if (!strcmp(str
, "vmerge"))
95 __setup("iommu=", setup_iommu
);
97 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash
);
100 * We precalculate the hash to avoid doing it on every allocation.
102 * The hash is important to spread CPUs across all the pools. For example,
103 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
104 * with 4 pools all primary threads would map to the same pool.
106 static int __init
setup_iommu_pool_hash(void)
110 for_each_possible_cpu(i
)
111 per_cpu(iommu_pool_hash
, i
) = hash_32(i
, IOMMU_POOL_HASHBITS
);
115 subsys_initcall(setup_iommu_pool_hash
);
117 #ifdef CONFIG_FAIL_IOMMU
119 static DECLARE_FAULT_ATTR(fail_iommu
);
121 static int __init
setup_fail_iommu(char *str
)
123 return setup_fault_attr(&fail_iommu
, str
);
125 __setup("fail_iommu=", setup_fail_iommu
);
127 static bool should_fail_iommu(struct device
*dev
)
129 return dev
->archdata
.fail_iommu
&& should_fail(&fail_iommu
, 1);
132 static int __init
fail_iommu_debugfs(void)
134 struct dentry
*dir
= fault_create_debugfs_attr("fail_iommu",
137 return PTR_ERR_OR_ZERO(dir
);
139 late_initcall(fail_iommu_debugfs
);
141 static ssize_t
fail_iommu_show(struct device
*dev
,
142 struct device_attribute
*attr
, char *buf
)
144 return sprintf(buf
, "%d\n", dev
->archdata
.fail_iommu
);
147 static ssize_t
fail_iommu_store(struct device
*dev
,
148 struct device_attribute
*attr
, const char *buf
,
153 if (count
> 0 && sscanf(buf
, "%d", &i
) > 0)
154 dev
->archdata
.fail_iommu
= (i
== 0) ? 0 : 1;
159 static DEVICE_ATTR_RW(fail_iommu
);
161 static int fail_iommu_bus_notify(struct notifier_block
*nb
,
162 unsigned long action
, void *data
)
164 struct device
*dev
= data
;
166 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
167 if (device_create_file(dev
, &dev_attr_fail_iommu
))
168 pr_warn("Unable to create IOMMU fault injection sysfs "
170 } else if (action
== BUS_NOTIFY_DEL_DEVICE
) {
171 device_remove_file(dev
, &dev_attr_fail_iommu
);
177 static struct notifier_block fail_iommu_bus_notifier
= {
178 .notifier_call
= fail_iommu_bus_notify
181 static int __init
fail_iommu_setup(void)
184 bus_register_notifier(&pci_bus_type
, &fail_iommu_bus_notifier
);
187 bus_register_notifier(&vio_bus_type
, &fail_iommu_bus_notifier
);
193 * Must execute after PCI and VIO subsystem have initialised but before
194 * devices are probed.
196 arch_initcall(fail_iommu_setup
);
198 static inline bool should_fail_iommu(struct device
*dev
)
204 static unsigned long iommu_range_alloc(struct device
*dev
,
205 struct iommu_table
*tbl
,
206 unsigned long npages
,
207 unsigned long *handle
,
209 unsigned int align_order
)
211 unsigned long n
, end
, start
;
213 int largealloc
= npages
> 15;
215 unsigned long align_mask
;
217 unsigned int pool_nr
;
218 struct iommu_pool
*pool
;
220 align_mask
= (1ull << align_order
) - 1;
222 /* This allocator was derived from x86_64's bit string search */
225 if (unlikely(npages
== 0)) {
226 if (printk_ratelimit())
228 return DMA_MAPPING_ERROR
;
231 if (should_fail_iommu(dev
))
232 return DMA_MAPPING_ERROR
;
235 * We don't need to disable preemption here because any CPU can
236 * safely use any IOMMU pool.
238 pool_nr
= raw_cpu_read(iommu_pool_hash
) & (tbl
->nr_pools
- 1);
241 pool
= &(tbl
->large_pool
);
243 pool
= &(tbl
->pools
[pool_nr
]);
245 spin_lock_irqsave(&(pool
->lock
), flags
);
248 if ((pass
== 0) && handle
&& *handle
&&
249 (*handle
>= pool
->start
) && (*handle
< pool
->end
))
256 /* The case below can happen if we have a small segment appended
257 * to a large, or when the previous alloc was at the very end of
258 * the available space. If so, go back to the initial start.
263 if (limit
+ tbl
->it_offset
> mask
) {
264 limit
= mask
- tbl
->it_offset
+ 1;
265 /* If we're constrained on address range, first try
266 * at the masked hint to avoid O(n) search complexity,
267 * but on second pass, start at 0 in pool 0.
269 if ((start
& mask
) >= limit
|| pass
> 0) {
270 spin_unlock(&(pool
->lock
));
271 pool
= &(tbl
->pools
[0]);
272 spin_lock(&(pool
->lock
));
279 n
= iommu_area_alloc(tbl
->it_map
, limit
, start
, npages
, tbl
->it_offset
,
280 dma_get_seg_boundary_nr_pages(dev
, tbl
->it_page_shift
),
283 if (likely(pass
== 0)) {
284 /* First try the pool from the start */
285 pool
->hint
= pool
->start
;
289 } else if (pass
<= tbl
->nr_pools
) {
290 /* Now try scanning all the other pools */
291 spin_unlock(&(pool
->lock
));
292 pool_nr
= (pool_nr
+ 1) & (tbl
->nr_pools
- 1);
293 pool
= &tbl
->pools
[pool_nr
];
294 spin_lock(&(pool
->lock
));
295 pool
->hint
= pool
->start
;
299 } else if (pass
== tbl
->nr_pools
+ 1) {
300 /* Last resort: try largepool */
301 spin_unlock(&pool
->lock
);
302 pool
= &tbl
->large_pool
;
303 spin_lock(&pool
->lock
);
304 pool
->hint
= pool
->start
;
310 spin_unlock_irqrestore(&(pool
->lock
), flags
);
311 return DMA_MAPPING_ERROR
;
317 /* Bump the hint to a new block for small allocs. */
319 /* Don't bump to new block to avoid fragmentation */
322 /* Overflow will be taken care of at the next allocation */
323 pool
->hint
= (end
+ tbl
->it_blocksize
- 1) &
324 ~(tbl
->it_blocksize
- 1);
327 /* Update handle for SG allocations */
331 spin_unlock_irqrestore(&(pool
->lock
), flags
);
336 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
337 void *page
, unsigned int npages
,
338 enum dma_data_direction direction
,
339 unsigned long mask
, unsigned int align_order
,
343 dma_addr_t ret
= DMA_MAPPING_ERROR
;
346 entry
= iommu_range_alloc(dev
, tbl
, npages
, NULL
, mask
, align_order
);
348 if (unlikely(entry
== DMA_MAPPING_ERROR
))
349 return DMA_MAPPING_ERROR
;
351 entry
+= tbl
->it_offset
; /* Offset into real TCE table */
352 ret
= entry
<< tbl
->it_page_shift
; /* Set the return dma address */
354 /* Put the TCEs in the HW table */
355 build_fail
= tbl
->it_ops
->set(tbl
, entry
, npages
,
356 (unsigned long)page
&
357 IOMMU_PAGE_MASK(tbl
), direction
, attrs
);
359 /* tbl->it_ops->set() only returns non-zero for transient errors.
360 * Clean up the table bitmap in this case and return
361 * DMA_MAPPING_ERROR. For all other errors the functionality is
364 if (unlikely(build_fail
)) {
365 __iommu_free(tbl
, ret
, npages
);
366 return DMA_MAPPING_ERROR
;
369 /* Flush/invalidate TLB caches if necessary */
370 if (tbl
->it_ops
->flush
)
371 tbl
->it_ops
->flush(tbl
);
373 /* Make sure updates are seen by hardware */
379 static bool iommu_free_check(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
382 unsigned long entry
, free_entry
;
384 entry
= dma_addr
>> tbl
->it_page_shift
;
385 free_entry
= entry
- tbl
->it_offset
;
387 if (((free_entry
+ npages
) > tbl
->it_size
) ||
388 (entry
< tbl
->it_offset
)) {
389 if (printk_ratelimit()) {
390 printk(KERN_INFO
"iommu_free: invalid entry\n");
391 printk(KERN_INFO
"\tentry = 0x%lx\n", entry
);
392 printk(KERN_INFO
"\tdma_addr = 0x%llx\n", (u64
)dma_addr
);
393 printk(KERN_INFO
"\tTable = 0x%llx\n", (u64
)tbl
);
394 printk(KERN_INFO
"\tbus# = 0x%llx\n", (u64
)tbl
->it_busno
);
395 printk(KERN_INFO
"\tsize = 0x%llx\n", (u64
)tbl
->it_size
);
396 printk(KERN_INFO
"\tstartOff = 0x%llx\n", (u64
)tbl
->it_offset
);
397 printk(KERN_INFO
"\tindex = 0x%llx\n", (u64
)tbl
->it_index
);
407 static struct iommu_pool
*get_pool(struct iommu_table
*tbl
,
410 struct iommu_pool
*p
;
411 unsigned long largepool_start
= tbl
->large_pool
.start
;
413 /* The large pool is the last pool at the top of the table */
414 if (entry
>= largepool_start
) {
415 p
= &tbl
->large_pool
;
417 unsigned int pool_nr
= entry
/ tbl
->poolsize
;
419 BUG_ON(pool_nr
> tbl
->nr_pools
);
420 p
= &tbl
->pools
[pool_nr
];
426 static void __iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
429 unsigned long entry
, free_entry
;
431 struct iommu_pool
*pool
;
433 entry
= dma_addr
>> tbl
->it_page_shift
;
434 free_entry
= entry
- tbl
->it_offset
;
436 pool
= get_pool(tbl
, free_entry
);
438 if (!iommu_free_check(tbl
, dma_addr
, npages
))
441 tbl
->it_ops
->clear(tbl
, entry
, npages
);
443 spin_lock_irqsave(&(pool
->lock
), flags
);
444 bitmap_clear(tbl
->it_map
, free_entry
, npages
);
445 spin_unlock_irqrestore(&(pool
->lock
), flags
);
448 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
451 __iommu_free(tbl
, dma_addr
, npages
);
453 /* Make sure TLB cache is flushed if the HW needs it. We do
454 * not do an mb() here on purpose, it is not needed on any of
455 * the current platforms.
457 if (tbl
->it_ops
->flush
)
458 tbl
->it_ops
->flush(tbl
);
461 int ppc_iommu_map_sg(struct device
*dev
, struct iommu_table
*tbl
,
462 struct scatterlist
*sglist
, int nelems
,
463 unsigned long mask
, enum dma_data_direction direction
,
466 dma_addr_t dma_next
= 0, dma_addr
;
467 struct scatterlist
*s
, *outs
, *segstart
;
468 int outcount
, incount
, i
, build_fail
= 0;
470 unsigned long handle
;
471 unsigned int max_seg_size
;
473 BUG_ON(direction
== DMA_NONE
);
475 if ((nelems
== 0) || !tbl
)
478 outs
= s
= segstart
= &sglist
[0];
483 /* Init first segment length for backout at failure */
484 outs
->dma_length
= 0;
486 DBG("sg mapping %d elements:\n", nelems
);
488 max_seg_size
= dma_get_max_seg_size(dev
);
489 for_each_sg(sglist
, s
, nelems
, i
) {
490 unsigned long vaddr
, npages
, entry
, slen
;
498 /* Allocate iommu entries for that segment */
499 vaddr
= (unsigned long) sg_virt(s
);
500 npages
= iommu_num_pages(vaddr
, slen
, IOMMU_PAGE_SIZE(tbl
));
502 if (tbl
->it_page_shift
< PAGE_SHIFT
&& slen
>= PAGE_SIZE
&&
503 (vaddr
& ~PAGE_MASK
) == 0)
504 align
= PAGE_SHIFT
- tbl
->it_page_shift
;
505 entry
= iommu_range_alloc(dev
, tbl
, npages
, &handle
,
506 mask
>> tbl
->it_page_shift
, align
);
508 DBG(" - vaddr: %lx, size: %lx\n", vaddr
, slen
);
511 if (unlikely(entry
== DMA_MAPPING_ERROR
)) {
512 if (!(attrs
& DMA_ATTR_NO_WARN
) &&
514 dev_info(dev
, "iommu_alloc failed, tbl %p "
515 "vaddr %lx npages %lu\n", tbl
, vaddr
,
520 /* Convert entry to a dma_addr_t */
521 entry
+= tbl
->it_offset
;
522 dma_addr
= entry
<< tbl
->it_page_shift
;
523 dma_addr
|= (s
->offset
& ~IOMMU_PAGE_MASK(tbl
));
525 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
526 npages
, entry
, dma_addr
);
528 /* Insert into HW table */
529 build_fail
= tbl
->it_ops
->set(tbl
, entry
, npages
,
530 vaddr
& IOMMU_PAGE_MASK(tbl
),
532 if(unlikely(build_fail
))
535 /* If we are in an open segment, try merging */
537 DBG(" - trying merge...\n");
538 /* We cannot merge if:
539 * - allocated dma_addr isn't contiguous to previous allocation
541 if (novmerge
|| (dma_addr
!= dma_next
) ||
542 (outs
->dma_length
+ s
->length
> max_seg_size
)) {
543 /* Can't merge: create a new segment */
546 outs
= sg_next(outs
);
547 DBG(" can't merge, new segment.\n");
549 outs
->dma_length
+= s
->length
;
550 DBG(" merged, new len: %ux\n", outs
->dma_length
);
555 /* This is a new segment, fill entries */
556 DBG(" - filling new segment.\n");
557 outs
->dma_address
= dma_addr
;
558 outs
->dma_length
= slen
;
561 /* Calculate next page pointer for contiguous check */
562 dma_next
= dma_addr
+ slen
;
564 DBG(" - dma next is: %lx\n", dma_next
);
567 /* Flush/invalidate TLB caches if necessary */
568 if (tbl
->it_ops
->flush
)
569 tbl
->it_ops
->flush(tbl
);
571 DBG("mapped %d elements:\n", outcount
);
573 /* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
574 * next entry of the sglist if we didn't fill the list completely
576 if (outcount
< incount
) {
577 outs
= sg_next(outs
);
578 outs
->dma_address
= DMA_MAPPING_ERROR
;
579 outs
->dma_length
= 0;
582 /* Make sure updates are seen by hardware */
588 for_each_sg(sglist
, s
, nelems
, i
) {
589 if (s
->dma_length
!= 0) {
590 unsigned long vaddr
, npages
;
592 vaddr
= s
->dma_address
& IOMMU_PAGE_MASK(tbl
);
593 npages
= iommu_num_pages(s
->dma_address
, s
->dma_length
,
594 IOMMU_PAGE_SIZE(tbl
));
595 __iommu_free(tbl
, vaddr
, npages
);
596 s
->dma_address
= DMA_MAPPING_ERROR
;
606 void ppc_iommu_unmap_sg(struct iommu_table
*tbl
, struct scatterlist
*sglist
,
607 int nelems
, enum dma_data_direction direction
,
610 struct scatterlist
*sg
;
612 BUG_ON(direction
== DMA_NONE
);
620 dma_addr_t dma_handle
= sg
->dma_address
;
622 if (sg
->dma_length
== 0)
624 npages
= iommu_num_pages(dma_handle
, sg
->dma_length
,
625 IOMMU_PAGE_SIZE(tbl
));
626 __iommu_free(tbl
, dma_handle
, npages
);
630 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
631 * do not do an mb() here, the affected platforms do not need it
634 if (tbl
->it_ops
->flush
)
635 tbl
->it_ops
->flush(tbl
);
638 static void iommu_table_clear(struct iommu_table
*tbl
)
641 * In case of firmware assisted dump system goes through clean
642 * reboot process at the time of system crash. Hence it's safe to
643 * clear the TCE entries if firmware assisted dump is active.
645 if (!is_kdump_kernel() || is_fadump_active()) {
646 /* Clear the table in case firmware left allocations in it */
647 tbl
->it_ops
->clear(tbl
, tbl
->it_offset
, tbl
->it_size
);
651 #ifdef CONFIG_CRASH_DUMP
652 if (tbl
->it_ops
->get
) {
653 unsigned long index
, tceval
, tcecount
= 0;
655 /* Reserve the existing mappings left by the first kernel. */
656 for (index
= 0; index
< tbl
->it_size
; index
++) {
657 tceval
= tbl
->it_ops
->get(tbl
, index
+ tbl
->it_offset
);
659 * Freed TCE entry contains 0x7fffffffffffffff on JS20
661 if (tceval
&& (tceval
!= 0x7fffffffffffffffUL
)) {
662 __set_bit(index
, tbl
->it_map
);
667 if ((tbl
->it_size
- tcecount
) < KDUMP_MIN_TCE_ENTRIES
) {
668 printk(KERN_WARNING
"TCE table is full; freeing ");
669 printk(KERN_WARNING
"%d entries for the kdump boot\n",
670 KDUMP_MIN_TCE_ENTRIES
);
671 for (index
= tbl
->it_size
- KDUMP_MIN_TCE_ENTRIES
;
672 index
< tbl
->it_size
; index
++)
673 __clear_bit(index
, tbl
->it_map
);
679 static void iommu_table_reserve_pages(struct iommu_table
*tbl
,
680 unsigned long res_start
, unsigned long res_end
)
684 WARN_ON_ONCE(res_end
< res_start
);
686 * Reserve page 0 so it will not be used for any mappings.
687 * This avoids buggy drivers that consider page 0 to be invalid
688 * to crash the machine or even lose data.
690 if (tbl
->it_offset
== 0)
691 set_bit(0, tbl
->it_map
);
693 tbl
->it_reserved_start
= res_start
;
694 tbl
->it_reserved_end
= res_end
;
696 /* Check if res_start..res_end isn't empty and overlaps the table */
697 if (res_start
&& res_end
&&
698 (tbl
->it_offset
+ tbl
->it_size
< res_start
||
699 res_end
< tbl
->it_offset
))
702 for (i
= tbl
->it_reserved_start
; i
< tbl
->it_reserved_end
; ++i
)
703 set_bit(i
- tbl
->it_offset
, tbl
->it_map
);
706 static void iommu_table_release_pages(struct iommu_table
*tbl
)
711 * In case we have reserved the first bit, we should not emit
714 if (tbl
->it_offset
== 0)
715 clear_bit(0, tbl
->it_map
);
717 for (i
= tbl
->it_reserved_start
; i
< tbl
->it_reserved_end
; ++i
)
718 clear_bit(i
- tbl
->it_offset
, tbl
->it_map
);
722 * Build a iommu_table structure. This contains a bit map which
723 * is used to manage allocation of the tce space.
725 struct iommu_table
*iommu_init_table(struct iommu_table
*tbl
, int nid
,
726 unsigned long res_start
, unsigned long res_end
)
729 static int welcomed
= 0;
731 struct iommu_pool
*p
;
733 BUG_ON(!tbl
->it_ops
);
735 /* number of bytes needed for the bitmap */
736 sz
= BITS_TO_LONGS(tbl
->it_size
) * sizeof(unsigned long);
738 tbl
->it_map
= vzalloc_node(sz
, nid
);
740 pr_err("%s: Can't allocate %ld bytes\n", __func__
, sz
);
744 iommu_table_reserve_pages(tbl
, res_start
, res_end
);
746 /* We only split the IOMMU table if we have 1GB or more of space */
747 if ((tbl
->it_size
<< tbl
->it_page_shift
) >= (1UL * 1024 * 1024 * 1024))
748 tbl
->nr_pools
= IOMMU_NR_POOLS
;
752 /* We reserve the top 1/4 of the table for large allocations */
753 tbl
->poolsize
= (tbl
->it_size
* 3 / 4) / tbl
->nr_pools
;
755 for (i
= 0; i
< tbl
->nr_pools
; i
++) {
757 spin_lock_init(&(p
->lock
));
758 p
->start
= tbl
->poolsize
* i
;
760 p
->end
= p
->start
+ tbl
->poolsize
;
763 p
= &tbl
->large_pool
;
764 spin_lock_init(&(p
->lock
));
765 p
->start
= tbl
->poolsize
* i
;
767 p
->end
= tbl
->it_size
;
769 iommu_table_clear(tbl
);
772 printk(KERN_INFO
"IOMMU table initialized, virtual merging %s\n",
773 novmerge
? "disabled" : "enabled");
777 iommu_debugfs_add(tbl
);
782 static void iommu_table_free(struct kref
*kref
)
784 struct iommu_table
*tbl
;
786 tbl
= container_of(kref
, struct iommu_table
, it_kref
);
788 if (tbl
->it_ops
->free
)
789 tbl
->it_ops
->free(tbl
);
796 iommu_debugfs_del(tbl
);
798 iommu_table_release_pages(tbl
);
800 /* verify that table contains no entries */
801 if (!bitmap_empty(tbl
->it_map
, tbl
->it_size
))
802 pr_warn("%s: Unexpected TCEs\n", __func__
);
811 struct iommu_table
*iommu_tce_table_get(struct iommu_table
*tbl
)
813 if (kref_get_unless_zero(&tbl
->it_kref
))
818 EXPORT_SYMBOL_GPL(iommu_tce_table_get
);
820 int iommu_tce_table_put(struct iommu_table
*tbl
)
825 return kref_put(&tbl
->it_kref
, iommu_table_free
);
827 EXPORT_SYMBOL_GPL(iommu_tce_table_put
);
829 /* Creates TCEs for a user provided buffer. The user buffer must be
830 * contiguous real kernel storage (not vmalloc). The address passed here
831 * comprises a page address and offset into that page. The dma_addr_t
832 * returned will point to the same byte within the page as was passed in.
834 dma_addr_t
iommu_map_page(struct device
*dev
, struct iommu_table
*tbl
,
835 struct page
*page
, unsigned long offset
, size_t size
,
836 unsigned long mask
, enum dma_data_direction direction
,
839 dma_addr_t dma_handle
= DMA_MAPPING_ERROR
;
842 unsigned int npages
, align
;
844 BUG_ON(direction
== DMA_NONE
);
846 vaddr
= page_address(page
) + offset
;
847 uaddr
= (unsigned long)vaddr
;
850 npages
= iommu_num_pages(uaddr
, size
, IOMMU_PAGE_SIZE(tbl
));
852 if (tbl
->it_page_shift
< PAGE_SHIFT
&& size
>= PAGE_SIZE
&&
853 ((unsigned long)vaddr
& ~PAGE_MASK
) == 0)
854 align
= PAGE_SHIFT
- tbl
->it_page_shift
;
856 dma_handle
= iommu_alloc(dev
, tbl
, vaddr
, npages
, direction
,
857 mask
>> tbl
->it_page_shift
, align
,
859 if (dma_handle
== DMA_MAPPING_ERROR
) {
860 if (!(attrs
& DMA_ATTR_NO_WARN
) &&
861 printk_ratelimit()) {
862 dev_info(dev
, "iommu_alloc failed, tbl %p "
863 "vaddr %p npages %d\n", tbl
, vaddr
,
867 dma_handle
|= (uaddr
& ~IOMMU_PAGE_MASK(tbl
));
873 void iommu_unmap_page(struct iommu_table
*tbl
, dma_addr_t dma_handle
,
874 size_t size
, enum dma_data_direction direction
,
879 BUG_ON(direction
== DMA_NONE
);
882 npages
= iommu_num_pages(dma_handle
, size
,
883 IOMMU_PAGE_SIZE(tbl
));
884 iommu_free(tbl
, dma_handle
, npages
);
888 /* Allocates a contiguous real buffer and creates mappings over it.
889 * Returns the virtual address of the buffer and sets dma_handle
890 * to the dma address (mapping) of the first page.
892 void *iommu_alloc_coherent(struct device
*dev
, struct iommu_table
*tbl
,
893 size_t size
, dma_addr_t
*dma_handle
,
894 unsigned long mask
, gfp_t flag
, int node
)
899 unsigned int nio_pages
, io_order
;
902 size
= PAGE_ALIGN(size
);
903 order
= get_order(size
);
906 * Client asked for way too much space. This is checked later
907 * anyway. It is easier to debug here for the drivers than in
910 if (order
>= IOMAP_MAX_ORDER
) {
911 dev_info(dev
, "iommu_alloc_consistent size too large: 0x%lx\n",
919 /* Alloc enough pages (and possibly more) */
920 page
= alloc_pages_node(node
, flag
, order
);
923 ret
= page_address(page
);
924 memset(ret
, 0, size
);
926 /* Set up tces to cover the allocated range */
927 nio_pages
= size
>> tbl
->it_page_shift
;
928 io_order
= get_iommu_order(size
, tbl
);
929 mapping
= iommu_alloc(dev
, tbl
, ret
, nio_pages
, DMA_BIDIRECTIONAL
,
930 mask
>> tbl
->it_page_shift
, io_order
, 0);
931 if (mapping
== DMA_MAPPING_ERROR
) {
932 free_pages((unsigned long)ret
, order
);
935 *dma_handle
= mapping
;
939 void iommu_free_coherent(struct iommu_table
*tbl
, size_t size
,
940 void *vaddr
, dma_addr_t dma_handle
)
943 unsigned int nio_pages
;
945 size
= PAGE_ALIGN(size
);
946 nio_pages
= size
>> tbl
->it_page_shift
;
947 iommu_free(tbl
, dma_handle
, nio_pages
);
948 size
= PAGE_ALIGN(size
);
949 free_pages((unsigned long)vaddr
, get_order(size
));
953 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir
)
956 case DMA_BIDIRECTIONAL
:
957 return TCE_PCI_READ
| TCE_PCI_WRITE
;
958 case DMA_FROM_DEVICE
:
959 return TCE_PCI_WRITE
;
966 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm
);
968 #ifdef CONFIG_IOMMU_API
972 static void group_release(void *iommu_data
)
974 struct iommu_table_group
*table_group
= iommu_data
;
976 table_group
->group
= NULL
;
979 void iommu_register_group(struct iommu_table_group
*table_group
,
980 int pci_domain_number
, unsigned long pe_num
)
982 struct iommu_group
*grp
;
985 grp
= iommu_group_alloc();
987 pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
991 table_group
->group
= grp
;
992 iommu_group_set_iommudata(grp
, table_group
, group_release
);
993 name
= kasprintf(GFP_KERNEL
, "domain%d-pe%lx",
994 pci_domain_number
, pe_num
);
997 iommu_group_set_name(grp
, name
);
1001 enum dma_data_direction
iommu_tce_direction(unsigned long tce
)
1003 if ((tce
& TCE_PCI_READ
) && (tce
& TCE_PCI_WRITE
))
1004 return DMA_BIDIRECTIONAL
;
1005 else if (tce
& TCE_PCI_READ
)
1006 return DMA_TO_DEVICE
;
1007 else if (tce
& TCE_PCI_WRITE
)
1008 return DMA_FROM_DEVICE
;
1012 EXPORT_SYMBOL_GPL(iommu_tce_direction
);
1014 void iommu_flush_tce(struct iommu_table
*tbl
)
1016 /* Flush/invalidate TLB caches if necessary */
1017 if (tbl
->it_ops
->flush
)
1018 tbl
->it_ops
->flush(tbl
);
1020 /* Make sure updates are seen by hardware */
1023 EXPORT_SYMBOL_GPL(iommu_flush_tce
);
1025 int iommu_tce_check_ioba(unsigned long page_shift
,
1026 unsigned long offset
, unsigned long size
,
1027 unsigned long ioba
, unsigned long npages
)
1029 unsigned long mask
= (1UL << page_shift
) - 1;
1034 ioba
>>= page_shift
;
1038 if ((ioba
+ 1) > (offset
+ size
))
1043 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba
);
1045 int iommu_tce_check_gpa(unsigned long page_shift
, unsigned long gpa
)
1047 unsigned long mask
= (1UL << page_shift
) - 1;
1054 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa
);
1056 extern long iommu_tce_xchg_no_kill(struct mm_struct
*mm
,
1057 struct iommu_table
*tbl
,
1058 unsigned long entry
, unsigned long *hpa
,
1059 enum dma_data_direction
*direction
)
1062 unsigned long size
= 0;
1064 ret
= tbl
->it_ops
->xchg_no_kill(tbl
, entry
, hpa
, direction
, false);
1065 if (!ret
&& ((*direction
== DMA_FROM_DEVICE
) ||
1066 (*direction
== DMA_BIDIRECTIONAL
)) &&
1067 !mm_iommu_is_devmem(mm
, *hpa
, tbl
->it_page_shift
,
1069 SetPageDirty(pfn_to_page(*hpa
>> PAGE_SHIFT
));
1073 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill
);
1075 void iommu_tce_kill(struct iommu_table
*tbl
,
1076 unsigned long entry
, unsigned long pages
)
1078 if (tbl
->it_ops
->tce_kill
)
1079 tbl
->it_ops
->tce_kill(tbl
, entry
, pages
, false);
1081 EXPORT_SYMBOL_GPL(iommu_tce_kill
);
1083 int iommu_take_ownership(struct iommu_table
*tbl
)
1085 unsigned long flags
, i
, sz
= (tbl
->it_size
+ 7) >> 3;
1089 * VFIO does not control TCE entries allocation and the guest
1090 * can write new TCEs on top of existing ones so iommu_tce_build()
1091 * must be able to release old pages. This functionality
1092 * requires exchange() callback defined so if it is not
1093 * implemented, we disallow taking ownership over the table.
1095 if (!tbl
->it_ops
->xchg_no_kill
)
1098 spin_lock_irqsave(&tbl
->large_pool
.lock
, flags
);
1099 for (i
= 0; i
< tbl
->nr_pools
; i
++)
1100 spin_lock_nest_lock(&tbl
->pools
[i
].lock
, &tbl
->large_pool
.lock
);
1102 iommu_table_release_pages(tbl
);
1104 if (!bitmap_empty(tbl
->it_map
, tbl
->it_size
)) {
1105 pr_err("iommu_tce: it_map is not empty");
1107 /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1108 iommu_table_reserve_pages(tbl
, tbl
->it_reserved_start
,
1109 tbl
->it_reserved_end
);
1111 memset(tbl
->it_map
, 0xff, sz
);
1114 for (i
= 0; i
< tbl
->nr_pools
; i
++)
1115 spin_unlock(&tbl
->pools
[i
].lock
);
1116 spin_unlock_irqrestore(&tbl
->large_pool
.lock
, flags
);
1120 EXPORT_SYMBOL_GPL(iommu_take_ownership
);
1122 void iommu_release_ownership(struct iommu_table
*tbl
)
1124 unsigned long flags
, i
, sz
= (tbl
->it_size
+ 7) >> 3;
1126 spin_lock_irqsave(&tbl
->large_pool
.lock
, flags
);
1127 for (i
= 0; i
< tbl
->nr_pools
; i
++)
1128 spin_lock_nest_lock(&tbl
->pools
[i
].lock
, &tbl
->large_pool
.lock
);
1130 memset(tbl
->it_map
, 0, sz
);
1132 iommu_table_reserve_pages(tbl
, tbl
->it_reserved_start
,
1133 tbl
->it_reserved_end
);
1135 for (i
= 0; i
< tbl
->nr_pools
; i
++)
1136 spin_unlock(&tbl
->pools
[i
].lock
);
1137 spin_unlock_irqrestore(&tbl
->large_pool
.lock
, flags
);
1139 EXPORT_SYMBOL_GPL(iommu_release_ownership
);
1141 int iommu_add_device(struct iommu_table_group
*table_group
, struct device
*dev
)
1144 * The sysfs entries should be populated before
1145 * binding IOMMU group. If sysfs entries isn't
1146 * ready, we simply bail.
1148 if (!device_is_registered(dev
))
1151 if (device_iommu_mapped(dev
)) {
1152 pr_debug("%s: Skipping device %s with iommu group %d\n",
1153 __func__
, dev_name(dev
),
1154 iommu_group_id(dev
->iommu_group
));
1158 pr_debug("%s: Adding %s to iommu group %d\n",
1159 __func__
, dev_name(dev
), iommu_group_id(table_group
->group
));
1161 return iommu_group_add_device(table_group
->group
, dev
);
1163 EXPORT_SYMBOL_GPL(iommu_add_device
);
1165 void iommu_del_device(struct device
*dev
)
1168 * Some devices might not have IOMMU table and group
1169 * and we needn't detach them from the associated
1172 if (!device_iommu_mapped(dev
)) {
1173 pr_debug("iommu_tce: skipping device %s with no tbl\n",
1178 iommu_group_remove_device(dev
);
1180 EXPORT_SYMBOL_GPL(iommu_del_device
);
1181 #endif /* CONFIG_IOMMU_API */