1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "iommu: " fmt
9 #include <linux/device.h>
10 #include <linux/dma-iommu.h>
11 #include <linux/kernel.h>
12 #include <linux/bits.h>
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/iommu.h>
20 #include <linux/idr.h>
21 #include <linux/notifier.h>
22 #include <linux/err.h>
23 #include <linux/pci.h>
24 #include <linux/bitops.h>
25 #include <linux/property.h>
26 #include <linux/fsl/mc.h>
27 #include <linux/module.h>
28 #include <linux/cc_platform.h>
29 #include <trace/events/iommu.h>
31 static struct kset
*iommu_group_kset
;
32 static DEFINE_IDA(iommu_group_ida
);
34 static unsigned int iommu_def_domain_type __read_mostly
;
35 static bool iommu_dma_strict __read_mostly
= IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT
);
36 static u32 iommu_cmd_line __read_mostly
;
40 struct kobject
*devices_kobj
;
41 struct list_head devices
;
43 struct blocking_notifier_head notifier
;
45 void (*iommu_data_release
)(void *iommu_data
);
48 struct iommu_domain
*default_domain
;
49 struct iommu_domain
*domain
;
50 struct list_head entry
;
54 struct list_head list
;
59 struct iommu_group_attribute
{
60 struct attribute attr
;
61 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
62 ssize_t (*store
)(struct iommu_group
*group
,
63 const char *buf
, size_t count
);
66 static const char * const iommu_group_resv_type_string
[] = {
67 [IOMMU_RESV_DIRECT
] = "direct",
68 [IOMMU_RESV_DIRECT_RELAXABLE
] = "direct-relaxable",
69 [IOMMU_RESV_RESERVED
] = "reserved",
70 [IOMMU_RESV_MSI
] = "msi",
71 [IOMMU_RESV_SW_MSI
] = "msi",
74 #define IOMMU_CMD_LINE_DMA_API BIT(0)
75 #define IOMMU_CMD_LINE_STRICT BIT(1)
77 static int iommu_alloc_default_domain(struct iommu_group
*group
,
79 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
81 static int __iommu_attach_device(struct iommu_domain
*domain
,
83 static int __iommu_attach_group(struct iommu_domain
*domain
,
84 struct iommu_group
*group
);
85 static void __iommu_detach_group(struct iommu_domain
*domain
,
86 struct iommu_group
*group
);
87 static int iommu_create_device_direct_mappings(struct iommu_group
*group
,
89 static struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
);
90 static ssize_t
iommu_group_store_type(struct iommu_group
*group
,
91 const char *buf
, size_t count
);
93 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
94 struct iommu_group_attribute iommu_group_attr_##_name = \
95 __ATTR(_name, _mode, _show, _store)
97 #define to_iommu_group_attr(_attr) \
98 container_of(_attr, struct iommu_group_attribute, attr)
99 #define to_iommu_group(_kobj) \
100 container_of(_kobj, struct iommu_group, kobj)
102 static LIST_HEAD(iommu_device_list
);
103 static DEFINE_SPINLOCK(iommu_device_lock
);
106 * Use a function instead of an array here because the domain-type is a
107 * bit-field, so an array would waste memory.
109 static const char *iommu_domain_type_str(unsigned int t
)
112 case IOMMU_DOMAIN_BLOCKED
:
114 case IOMMU_DOMAIN_IDENTITY
:
115 return "Passthrough";
116 case IOMMU_DOMAIN_UNMANAGED
:
118 case IOMMU_DOMAIN_DMA
:
119 case IOMMU_DOMAIN_DMA_FQ
:
126 static int __init
iommu_subsys_init(void)
128 if (!(iommu_cmd_line
& IOMMU_CMD_LINE_DMA_API
)) {
129 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH
))
130 iommu_set_default_passthrough(false);
132 iommu_set_default_translated(false);
134 if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT
)) {
135 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
136 iommu_set_default_translated(false);
140 if (!iommu_default_passthrough() && !iommu_dma_strict
)
141 iommu_def_domain_type
= IOMMU_DOMAIN_DMA_FQ
;
143 pr_info("Default domain type: %s %s\n",
144 iommu_domain_type_str(iommu_def_domain_type
),
145 (iommu_cmd_line
& IOMMU_CMD_LINE_DMA_API
) ?
146 "(set via kernel command line)" : "");
148 if (!iommu_default_passthrough())
149 pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
150 iommu_dma_strict
? "strict" : "lazy",
151 (iommu_cmd_line
& IOMMU_CMD_LINE_STRICT
) ?
152 "(set via kernel command line)" : "");
156 subsys_initcall(iommu_subsys_init
);
159 * iommu_device_register() - Register an IOMMU hardware instance
160 * @iommu: IOMMU handle for the instance
161 * @ops: IOMMU ops to associate with the instance
162 * @hwdev: (optional) actual instance device, used for fwnode lookup
164 * Return: 0 on success, or an error.
166 int iommu_device_register(struct iommu_device
*iommu
,
167 const struct iommu_ops
*ops
, struct device
*hwdev
)
169 /* We need to be able to take module references appropriately */
170 if (WARN_ON(is_module_address((unsigned long)ops
) && !ops
->owner
))
175 iommu
->fwnode
= hwdev
->fwnode
;
177 spin_lock(&iommu_device_lock
);
178 list_add_tail(&iommu
->list
, &iommu_device_list
);
179 spin_unlock(&iommu_device_lock
);
182 EXPORT_SYMBOL_GPL(iommu_device_register
);
184 void iommu_device_unregister(struct iommu_device
*iommu
)
186 spin_lock(&iommu_device_lock
);
187 list_del(&iommu
->list
);
188 spin_unlock(&iommu_device_lock
);
190 EXPORT_SYMBOL_GPL(iommu_device_unregister
);
192 static struct dev_iommu
*dev_iommu_get(struct device
*dev
)
194 struct dev_iommu
*param
= dev
->iommu
;
199 param
= kzalloc(sizeof(*param
), GFP_KERNEL
);
203 mutex_init(¶m
->lock
);
208 static void dev_iommu_free(struct device
*dev
)
210 iommu_fwspec_free(dev
);
215 static int __iommu_probe_device(struct device
*dev
, struct list_head
*group_list
)
217 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
218 struct iommu_device
*iommu_dev
;
219 struct iommu_group
*group
;
225 if (!dev_iommu_get(dev
))
228 if (!try_module_get(ops
->owner
)) {
233 iommu_dev
= ops
->probe_device(dev
);
234 if (IS_ERR(iommu_dev
)) {
235 ret
= PTR_ERR(iommu_dev
);
239 dev
->iommu
->iommu_dev
= iommu_dev
;
241 group
= iommu_group_get_for_dev(dev
);
243 ret
= PTR_ERR(group
);
246 iommu_group_put(group
);
248 if (group_list
&& !group
->default_domain
&& list_empty(&group
->entry
))
249 list_add_tail(&group
->entry
, group_list
);
251 iommu_device_link(iommu_dev
, dev
);
256 ops
->release_device(dev
);
259 module_put(ops
->owner
);
267 int iommu_probe_device(struct device
*dev
)
269 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
270 struct iommu_group
*group
;
273 ret
= __iommu_probe_device(dev
, NULL
);
277 group
= iommu_group_get(dev
);
284 * Try to allocate a default domain - needs support from the
285 * IOMMU driver. There are still some drivers which don't
286 * support default domains, so the return value is not yet
289 mutex_lock(&group
->mutex
);
290 iommu_alloc_default_domain(group
, dev
);
291 mutex_unlock(&group
->mutex
);
293 if (group
->default_domain
) {
294 ret
= __iommu_attach_device(group
->default_domain
, dev
);
296 iommu_group_put(group
);
301 iommu_create_device_direct_mappings(group
, dev
);
303 iommu_group_put(group
);
305 if (ops
->probe_finalize
)
306 ops
->probe_finalize(dev
);
311 iommu_release_device(dev
);
318 void iommu_release_device(struct device
*dev
)
320 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
325 iommu_device_unlink(dev
->iommu
->iommu_dev
, dev
);
327 ops
->release_device(dev
);
329 iommu_group_remove_device(dev
);
330 module_put(ops
->owner
);
334 static int __init
iommu_set_def_domain_type(char *str
)
339 ret
= kstrtobool(str
, &pt
);
344 iommu_set_default_passthrough(true);
346 iommu_set_default_translated(true);
350 early_param("iommu.passthrough", iommu_set_def_domain_type
);
352 static int __init
iommu_dma_setup(char *str
)
354 int ret
= kstrtobool(str
, &iommu_dma_strict
);
357 iommu_cmd_line
|= IOMMU_CMD_LINE_STRICT
;
360 early_param("iommu.strict", iommu_dma_setup
);
362 void iommu_set_dma_strict(void)
364 iommu_dma_strict
= true;
365 if (iommu_def_domain_type
== IOMMU_DOMAIN_DMA_FQ
)
366 iommu_def_domain_type
= IOMMU_DOMAIN_DMA
;
369 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
370 struct attribute
*__attr
, char *buf
)
372 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
373 struct iommu_group
*group
= to_iommu_group(kobj
);
377 ret
= attr
->show(group
, buf
);
381 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
382 struct attribute
*__attr
,
383 const char *buf
, size_t count
)
385 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
386 struct iommu_group
*group
= to_iommu_group(kobj
);
390 ret
= attr
->store(group
, buf
, count
);
394 static const struct sysfs_ops iommu_group_sysfs_ops
= {
395 .show
= iommu_group_attr_show
,
396 .store
= iommu_group_attr_store
,
399 static int iommu_group_create_file(struct iommu_group
*group
,
400 struct iommu_group_attribute
*attr
)
402 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
405 static void iommu_group_remove_file(struct iommu_group
*group
,
406 struct iommu_group_attribute
*attr
)
408 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
411 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
413 return sprintf(buf
, "%s\n", group
->name
);
417 * iommu_insert_resv_region - Insert a new region in the
418 * list of reserved regions.
419 * @new: new region to insert
420 * @regions: list of regions
422 * Elements are sorted by start address and overlapping segments
423 * of the same type are merged.
425 static int iommu_insert_resv_region(struct iommu_resv_region
*new,
426 struct list_head
*regions
)
428 struct iommu_resv_region
*iter
, *tmp
, *nr
, *top
;
431 nr
= iommu_alloc_resv_region(new->start
, new->length
,
432 new->prot
, new->type
);
436 /* First add the new element based on start address sorting */
437 list_for_each_entry(iter
, regions
, list
) {
438 if (nr
->start
< iter
->start
||
439 (nr
->start
== iter
->start
&& nr
->type
<= iter
->type
))
442 list_add_tail(&nr
->list
, &iter
->list
);
444 /* Merge overlapping segments of type nr->type in @regions, if any */
445 list_for_each_entry_safe(iter
, tmp
, regions
, list
) {
446 phys_addr_t top_end
, iter_end
= iter
->start
+ iter
->length
- 1;
448 /* no merge needed on elements of different types than @new */
449 if (iter
->type
!= new->type
) {
450 list_move_tail(&iter
->list
, &stack
);
454 /* look for the last stack element of same type as @iter */
455 list_for_each_entry_reverse(top
, &stack
, list
)
456 if (top
->type
== iter
->type
)
459 list_move_tail(&iter
->list
, &stack
);
463 top_end
= top
->start
+ top
->length
- 1;
465 if (iter
->start
> top_end
+ 1) {
466 list_move_tail(&iter
->list
, &stack
);
468 top
->length
= max(top_end
, iter_end
) - top
->start
+ 1;
469 list_del(&iter
->list
);
473 list_splice(&stack
, regions
);
478 iommu_insert_device_resv_regions(struct list_head
*dev_resv_regions
,
479 struct list_head
*group_resv_regions
)
481 struct iommu_resv_region
*entry
;
484 list_for_each_entry(entry
, dev_resv_regions
, list
) {
485 ret
= iommu_insert_resv_region(entry
, group_resv_regions
);
492 int iommu_get_group_resv_regions(struct iommu_group
*group
,
493 struct list_head
*head
)
495 struct group_device
*device
;
498 mutex_lock(&group
->mutex
);
499 list_for_each_entry(device
, &group
->devices
, list
) {
500 struct list_head dev_resv_regions
;
502 INIT_LIST_HEAD(&dev_resv_regions
);
503 iommu_get_resv_regions(device
->dev
, &dev_resv_regions
);
504 ret
= iommu_insert_device_resv_regions(&dev_resv_regions
, head
);
505 iommu_put_resv_regions(device
->dev
, &dev_resv_regions
);
509 mutex_unlock(&group
->mutex
);
512 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions
);
514 static ssize_t
iommu_group_show_resv_regions(struct iommu_group
*group
,
517 struct iommu_resv_region
*region
, *next
;
518 struct list_head group_resv_regions
;
521 INIT_LIST_HEAD(&group_resv_regions
);
522 iommu_get_group_resv_regions(group
, &group_resv_regions
);
524 list_for_each_entry_safe(region
, next
, &group_resv_regions
, list
) {
525 str
+= sprintf(str
, "0x%016llx 0x%016llx %s\n",
526 (long long int)region
->start
,
527 (long long int)(region
->start
+
529 iommu_group_resv_type_string
[region
->type
]);
536 static ssize_t
iommu_group_show_type(struct iommu_group
*group
,
539 char *type
= "unknown\n";
541 mutex_lock(&group
->mutex
);
542 if (group
->default_domain
) {
543 switch (group
->default_domain
->type
) {
544 case IOMMU_DOMAIN_BLOCKED
:
547 case IOMMU_DOMAIN_IDENTITY
:
550 case IOMMU_DOMAIN_UNMANAGED
:
551 type
= "unmanaged\n";
553 case IOMMU_DOMAIN_DMA
:
556 case IOMMU_DOMAIN_DMA_FQ
:
561 mutex_unlock(&group
->mutex
);
567 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
569 static IOMMU_GROUP_ATTR(reserved_regions
, 0444,
570 iommu_group_show_resv_regions
, NULL
);
572 static IOMMU_GROUP_ATTR(type
, 0644, iommu_group_show_type
,
573 iommu_group_store_type
);
575 static void iommu_group_release(struct kobject
*kobj
)
577 struct iommu_group
*group
= to_iommu_group(kobj
);
579 pr_debug("Releasing group %d\n", group
->id
);
581 if (group
->iommu_data_release
)
582 group
->iommu_data_release(group
->iommu_data
);
584 ida_simple_remove(&iommu_group_ida
, group
->id
);
586 if (group
->default_domain
)
587 iommu_domain_free(group
->default_domain
);
593 static struct kobj_type iommu_group_ktype
= {
594 .sysfs_ops
= &iommu_group_sysfs_ops
,
595 .release
= iommu_group_release
,
599 * iommu_group_alloc - Allocate a new group
601 * This function is called by an iommu driver to allocate a new iommu
602 * group. The iommu group represents the minimum granularity of the iommu.
603 * Upon successful return, the caller holds a reference to the supplied
604 * group in order to hold the group until devices are added. Use
605 * iommu_group_put() to release this extra reference count, allowing the
606 * group to be automatically reclaimed once it has no devices or external
609 struct iommu_group
*iommu_group_alloc(void)
611 struct iommu_group
*group
;
614 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
616 return ERR_PTR(-ENOMEM
);
618 group
->kobj
.kset
= iommu_group_kset
;
619 mutex_init(&group
->mutex
);
620 INIT_LIST_HEAD(&group
->devices
);
621 INIT_LIST_HEAD(&group
->entry
);
622 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
624 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
631 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
632 NULL
, "%d", group
->id
);
634 ida_simple_remove(&iommu_group_ida
, group
->id
);
635 kobject_put(&group
->kobj
);
639 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
640 if (!group
->devices_kobj
) {
641 kobject_put(&group
->kobj
); /* triggers .release & free */
642 return ERR_PTR(-ENOMEM
);
646 * The devices_kobj holds a reference on the group kobject, so
647 * as long as that exists so will the group. We can therefore
648 * use the devices_kobj for reference counting.
650 kobject_put(&group
->kobj
);
652 ret
= iommu_group_create_file(group
,
653 &iommu_group_attr_reserved_regions
);
657 ret
= iommu_group_create_file(group
, &iommu_group_attr_type
);
661 pr_debug("Allocated group %d\n", group
->id
);
665 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
667 struct iommu_group
*iommu_group_get_by_id(int id
)
669 struct kobject
*group_kobj
;
670 struct iommu_group
*group
;
673 if (!iommu_group_kset
)
676 name
= kasprintf(GFP_KERNEL
, "%d", id
);
680 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
686 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
687 BUG_ON(group
->id
!= id
);
689 kobject_get(group
->devices_kobj
);
690 kobject_put(&group
->kobj
);
694 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
697 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
700 * iommu drivers can store data in the group for use when doing iommu
701 * operations. This function provides a way to retrieve it. Caller
702 * should hold a group reference.
704 void *iommu_group_get_iommudata(struct iommu_group
*group
)
706 return group
->iommu_data
;
708 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
711 * iommu_group_set_iommudata - set iommu_data for a group
713 * @iommu_data: new data
714 * @release: release function for iommu_data
716 * iommu drivers can store data in the group for use when doing iommu
717 * operations. This function provides a way to set the data after
718 * the group has been allocated. Caller should hold a group reference.
720 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
721 void (*release
)(void *iommu_data
))
723 group
->iommu_data
= iommu_data
;
724 group
->iommu_data_release
= release
;
726 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
729 * iommu_group_set_name - set name for a group
733 * Allow iommu driver to set a name for a group. When set it will
734 * appear in a name attribute file under the group in sysfs.
736 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
741 iommu_group_remove_file(group
, &iommu_group_attr_name
);
748 group
->name
= kstrdup(name
, GFP_KERNEL
);
752 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
761 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
763 static int iommu_create_device_direct_mappings(struct iommu_group
*group
,
766 struct iommu_domain
*domain
= group
->default_domain
;
767 struct iommu_resv_region
*entry
;
768 struct list_head mappings
;
769 unsigned long pg_size
;
772 if (!domain
|| !iommu_is_dma_domain(domain
))
775 BUG_ON(!domain
->pgsize_bitmap
);
777 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
778 INIT_LIST_HEAD(&mappings
);
780 iommu_get_resv_regions(dev
, &mappings
);
782 /* We need to consider overlapping regions for different devices */
783 list_for_each_entry(entry
, &mappings
, list
) {
784 dma_addr_t start
, end
, addr
;
787 if (domain
->ops
->apply_resv_region
)
788 domain
->ops
->apply_resv_region(dev
, domain
, entry
);
790 start
= ALIGN(entry
->start
, pg_size
);
791 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
793 if (entry
->type
!= IOMMU_RESV_DIRECT
&&
794 entry
->type
!= IOMMU_RESV_DIRECT_RELAXABLE
)
797 for (addr
= start
; addr
<= end
; addr
+= pg_size
) {
798 phys_addr_t phys_addr
;
803 phys_addr
= iommu_iova_to_phys(domain
, addr
);
811 ret
= iommu_map(domain
, addr
- map_size
,
812 addr
- map_size
, map_size
,
822 iommu_flush_iotlb_all(domain
);
825 iommu_put_resv_regions(dev
, &mappings
);
830 static bool iommu_is_attach_deferred(struct iommu_domain
*domain
,
833 if (domain
->ops
->is_attach_deferred
)
834 return domain
->ops
->is_attach_deferred(domain
, dev
);
840 * iommu_group_add_device - add a device to an iommu group
841 * @group: the group into which to add the device (reference should be held)
844 * This function is called by an iommu driver to add a device into a
845 * group. Adding a device increments the group reference count.
847 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
850 struct group_device
*device
;
852 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
858 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
860 goto err_free_device
;
862 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
866 goto err_remove_link
;
869 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
870 &dev
->kobj
, device
->name
);
872 if (ret
== -EEXIST
&& i
>= 0) {
874 * Account for the slim chance of collision
875 * and append an instance to the name.
878 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
879 kobject_name(&dev
->kobj
), i
++);
885 kobject_get(group
->devices_kobj
);
887 dev
->iommu_group
= group
;
889 mutex_lock(&group
->mutex
);
890 list_add_tail(&device
->list
, &group
->devices
);
891 if (group
->domain
&& !iommu_is_attach_deferred(group
->domain
, dev
))
892 ret
= __iommu_attach_device(group
->domain
, dev
);
893 mutex_unlock(&group
->mutex
);
897 /* Notify any listeners about change to group. */
898 blocking_notifier_call_chain(&group
->notifier
,
899 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
901 trace_add_device_to_group(group
->id
, dev
);
903 dev_info(dev
, "Adding to iommu group %d\n", group
->id
);
908 mutex_lock(&group
->mutex
);
909 list_del(&device
->list
);
910 mutex_unlock(&group
->mutex
);
911 dev
->iommu_group
= NULL
;
912 kobject_put(group
->devices_kobj
);
913 sysfs_remove_link(group
->devices_kobj
, device
->name
);
917 sysfs_remove_link(&dev
->kobj
, "iommu_group");
920 dev_err(dev
, "Failed to add to iommu group %d: %d\n", group
->id
, ret
);
923 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
926 * iommu_group_remove_device - remove a device from it's current group
927 * @dev: device to be removed
929 * This function is called by an iommu driver to remove the device from
930 * it's current group. This decrements the iommu group reference count.
932 void iommu_group_remove_device(struct device
*dev
)
934 struct iommu_group
*group
= dev
->iommu_group
;
935 struct group_device
*tmp_device
, *device
= NULL
;
940 dev_info(dev
, "Removing from iommu group %d\n", group
->id
);
942 /* Pre-notify listeners that a device is being removed. */
943 blocking_notifier_call_chain(&group
->notifier
,
944 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
946 mutex_lock(&group
->mutex
);
947 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
948 if (tmp_device
->dev
== dev
) {
950 list_del(&device
->list
);
954 mutex_unlock(&group
->mutex
);
959 sysfs_remove_link(group
->devices_kobj
, device
->name
);
960 sysfs_remove_link(&dev
->kobj
, "iommu_group");
962 trace_remove_device_from_group(group
->id
, dev
);
966 dev
->iommu_group
= NULL
;
967 kobject_put(group
->devices_kobj
);
969 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
971 static int iommu_group_device_count(struct iommu_group
*group
)
973 struct group_device
*entry
;
976 list_for_each_entry(entry
, &group
->devices
, list
)
983 * iommu_group_for_each_dev - iterate over each device in the group
985 * @data: caller opaque data to be passed to callback function
986 * @fn: caller supplied callback function
988 * This function is called by group users to iterate over group devices.
989 * Callers should hold a reference count to the group during callback.
990 * The group->mutex is held across callbacks, which will block calls to
991 * iommu_group_add/remove_device.
993 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
994 int (*fn
)(struct device
*, void *))
996 struct group_device
*device
;
999 list_for_each_entry(device
, &group
->devices
, list
) {
1000 ret
= fn(device
->dev
, data
);
1008 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
1009 int (*fn
)(struct device
*, void *))
1013 mutex_lock(&group
->mutex
);
1014 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
1015 mutex_unlock(&group
->mutex
);
1019 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
1022 * iommu_group_get - Return the group for a device and increment reference
1023 * @dev: get the group that this device belongs to
1025 * This function is called by iommu drivers and users to get the group
1026 * for the specified device. If found, the group is returned and the group
1027 * reference in incremented, else NULL.
1029 struct iommu_group
*iommu_group_get(struct device
*dev
)
1031 struct iommu_group
*group
= dev
->iommu_group
;
1034 kobject_get(group
->devices_kobj
);
1038 EXPORT_SYMBOL_GPL(iommu_group_get
);
1041 * iommu_group_ref_get - Increment reference on a group
1042 * @group: the group to use, must not be NULL
1044 * This function is called by iommu drivers to take additional references on an
1045 * existing group. Returns the given group for convenience.
1047 struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
)
1049 kobject_get(group
->devices_kobj
);
1052 EXPORT_SYMBOL_GPL(iommu_group_ref_get
);
1055 * iommu_group_put - Decrement group reference
1056 * @group: the group to use
1058 * This function is called by iommu drivers and users to release the
1059 * iommu group. Once the reference count is zero, the group is released.
1061 void iommu_group_put(struct iommu_group
*group
)
1064 kobject_put(group
->devices_kobj
);
1066 EXPORT_SYMBOL_GPL(iommu_group_put
);
1069 * iommu_group_register_notifier - Register a notifier for group changes
1070 * @group: the group to watch
1071 * @nb: notifier block to signal
1073 * This function allows iommu group users to track changes in a group.
1074 * See include/linux/iommu.h for actions sent via this notifier. Caller
1075 * should hold a reference to the group throughout notifier registration.
1077 int iommu_group_register_notifier(struct iommu_group
*group
,
1078 struct notifier_block
*nb
)
1080 return blocking_notifier_chain_register(&group
->notifier
, nb
);
1082 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
1085 * iommu_group_unregister_notifier - Unregister a notifier
1086 * @group: the group to watch
1087 * @nb: notifier block to signal
1089 * Unregister a previously registered group notifier block.
1091 int iommu_group_unregister_notifier(struct iommu_group
*group
,
1092 struct notifier_block
*nb
)
1094 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
1096 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
1099 * iommu_register_device_fault_handler() - Register a device fault handler
1101 * @handler: the fault handler
1102 * @data: private data passed as argument to the handler
1104 * When an IOMMU fault event is received, this handler gets called with the
1105 * fault event and data as argument. The handler should return 0 on success. If
1106 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1107 * complete the fault by calling iommu_page_response() with one of the following
1109 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1110 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1111 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1112 * page faults if possible.
1114 * Return 0 if the fault handler was installed successfully, or an error.
1116 int iommu_register_device_fault_handler(struct device
*dev
,
1117 iommu_dev_fault_handler_t handler
,
1120 struct dev_iommu
*param
= dev
->iommu
;
1126 mutex_lock(¶m
->lock
);
1127 /* Only allow one fault handler registered for each device */
1128 if (param
->fault_param
) {
1134 param
->fault_param
= kzalloc(sizeof(*param
->fault_param
), GFP_KERNEL
);
1135 if (!param
->fault_param
) {
1140 param
->fault_param
->handler
= handler
;
1141 param
->fault_param
->data
= data
;
1142 mutex_init(¶m
->fault_param
->lock
);
1143 INIT_LIST_HEAD(¶m
->fault_param
->faults
);
1146 mutex_unlock(¶m
->lock
);
1150 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler
);
1153 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1156 * Remove the device fault handler installed with
1157 * iommu_register_device_fault_handler().
1159 * Return 0 on success, or an error.
1161 int iommu_unregister_device_fault_handler(struct device
*dev
)
1163 struct dev_iommu
*param
= dev
->iommu
;
1169 mutex_lock(¶m
->lock
);
1171 if (!param
->fault_param
)
1174 /* we cannot unregister handler if there are pending faults */
1175 if (!list_empty(¶m
->fault_param
->faults
)) {
1180 kfree(param
->fault_param
);
1181 param
->fault_param
= NULL
;
1184 mutex_unlock(¶m
->lock
);
1188 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler
);
1191 * iommu_report_device_fault() - Report fault event to device driver
1193 * @evt: fault event data
1195 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1196 * handler. When this function fails and the fault is recoverable, it is the
1197 * caller's responsibility to complete the fault.
1199 * Return 0 on success, or an error.
1201 int iommu_report_device_fault(struct device
*dev
, struct iommu_fault_event
*evt
)
1203 struct dev_iommu
*param
= dev
->iommu
;
1204 struct iommu_fault_event
*evt_pending
= NULL
;
1205 struct iommu_fault_param
*fparam
;
1211 /* we only report device fault if there is a handler registered */
1212 mutex_lock(¶m
->lock
);
1213 fparam
= param
->fault_param
;
1214 if (!fparam
|| !fparam
->handler
) {
1219 if (evt
->fault
.type
== IOMMU_FAULT_PAGE_REQ
&&
1220 (evt
->fault
.prm
.flags
& IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE
)) {
1221 evt_pending
= kmemdup(evt
, sizeof(struct iommu_fault_event
),
1227 mutex_lock(&fparam
->lock
);
1228 list_add_tail(&evt_pending
->list
, &fparam
->faults
);
1229 mutex_unlock(&fparam
->lock
);
1232 ret
= fparam
->handler(&evt
->fault
, fparam
->data
);
1233 if (ret
&& evt_pending
) {
1234 mutex_lock(&fparam
->lock
);
1235 list_del(&evt_pending
->list
);
1236 mutex_unlock(&fparam
->lock
);
1240 mutex_unlock(¶m
->lock
);
1243 EXPORT_SYMBOL_GPL(iommu_report_device_fault
);
1245 int iommu_page_response(struct device
*dev
,
1246 struct iommu_page_response
*msg
)
1250 struct iommu_fault_event
*evt
;
1251 struct iommu_fault_page_request
*prm
;
1252 struct dev_iommu
*param
= dev
->iommu
;
1253 bool has_pasid
= msg
->flags
& IOMMU_PAGE_RESP_PASID_VALID
;
1254 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1256 if (!domain
|| !domain
->ops
->page_response
)
1259 if (!param
|| !param
->fault_param
)
1262 if (msg
->version
!= IOMMU_PAGE_RESP_VERSION_1
||
1263 msg
->flags
& ~IOMMU_PAGE_RESP_PASID_VALID
)
1266 /* Only send response if there is a fault report pending */
1267 mutex_lock(¶m
->fault_param
->lock
);
1268 if (list_empty(¶m
->fault_param
->faults
)) {
1269 dev_warn_ratelimited(dev
, "no pending PRQ, drop response\n");
1273 * Check if we have a matching page request pending to respond,
1274 * otherwise return -EINVAL
1276 list_for_each_entry(evt
, ¶m
->fault_param
->faults
, list
) {
1277 prm
= &evt
->fault
.prm
;
1278 if (prm
->grpid
!= msg
->grpid
)
1282 * If the PASID is required, the corresponding request is
1283 * matched using the group ID, the PASID valid bit and the PASID
1284 * value. Otherwise only the group ID matches request and
1287 needs_pasid
= prm
->flags
& IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID
;
1288 if (needs_pasid
&& (!has_pasid
|| msg
->pasid
!= prm
->pasid
))
1291 if (!needs_pasid
&& has_pasid
) {
1292 /* No big deal, just clear it. */
1293 msg
->flags
&= ~IOMMU_PAGE_RESP_PASID_VALID
;
1297 ret
= domain
->ops
->page_response(dev
, evt
, msg
);
1298 list_del(&evt
->list
);
1304 mutex_unlock(¶m
->fault_param
->lock
);
1307 EXPORT_SYMBOL_GPL(iommu_page_response
);
1310 * iommu_group_id - Return ID for a group
1311 * @group: the group to ID
1313 * Return the unique ID for the group matching the sysfs group number.
1315 int iommu_group_id(struct iommu_group
*group
)
1319 EXPORT_SYMBOL_GPL(iommu_group_id
);
1321 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
1322 unsigned long *devfns
);
1325 * To consider a PCI device isolated, we require ACS to support Source
1326 * Validation, Request Redirection, Completer Redirection, and Upstream
1327 * Forwarding. This effectively means that devices cannot spoof their
1328 * requester ID, requests and completions cannot be redirected, and all
1329 * transactions are forwarded upstream, even as it passes through a
1330 * bridge where the target device is downstream.
1332 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1335 * For multifunction devices which are not isolated from each other, find
1336 * all the other non-isolated functions and look for existing groups. For
1337 * each function, we also need to look for aliases to or from other devices
1338 * that may already have a group.
1340 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
1341 unsigned long *devfns
)
1343 struct pci_dev
*tmp
= NULL
;
1344 struct iommu_group
*group
;
1346 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
1349 for_each_pci_dev(tmp
) {
1350 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
1351 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
1352 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
1355 group
= get_pci_alias_group(tmp
, devfns
);
1366 * Look for aliases to or from the given device for existing groups. DMA
1367 * aliases are only supported on the same bus, therefore the search
1368 * space is quite small (especially since we're really only looking at pcie
1369 * device, and therefore only expect multiple slots on the root complex or
1370 * downstream switch ports). It's conceivable though that a pair of
1371 * multifunction devices could have aliases between them that would cause a
1372 * loop. To prevent this, we use a bitmap to track where we've been.
1374 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
1375 unsigned long *devfns
)
1377 struct pci_dev
*tmp
= NULL
;
1378 struct iommu_group
*group
;
1380 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
1383 group
= iommu_group_get(&pdev
->dev
);
1387 for_each_pci_dev(tmp
) {
1388 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
1391 /* We alias them or they alias us */
1392 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
1393 group
= get_pci_alias_group(tmp
, devfns
);
1399 group
= get_pci_function_alias_group(tmp
, devfns
);
1410 struct group_for_pci_data
{
1411 struct pci_dev
*pdev
;
1412 struct iommu_group
*group
;
1416 * DMA alias iterator callback, return the last seen device. Stop and return
1417 * the IOMMU group if we find one along the way.
1419 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
1421 struct group_for_pci_data
*data
= opaque
;
1424 data
->group
= iommu_group_get(&pdev
->dev
);
1426 return data
->group
!= NULL
;
1430 * Generic device_group call-back function. It just allocates one
1431 * iommu-group per device.
1433 struct iommu_group
*generic_device_group(struct device
*dev
)
1435 return iommu_group_alloc();
1437 EXPORT_SYMBOL_GPL(generic_device_group
);
1440 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1441 * to find or create an IOMMU group for a device.
1443 struct iommu_group
*pci_device_group(struct device
*dev
)
1445 struct pci_dev
*pdev
= to_pci_dev(dev
);
1446 struct group_for_pci_data data
;
1447 struct pci_bus
*bus
;
1448 struct iommu_group
*group
= NULL
;
1449 u64 devfns
[4] = { 0 };
1451 if (WARN_ON(!dev_is_pci(dev
)))
1452 return ERR_PTR(-EINVAL
);
1455 * Find the upstream DMA alias for the device. A device must not
1456 * be aliased due to topology in order to have its own IOMMU group.
1457 * If we find an alias along the way that already belongs to a
1460 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
1466 * Continue upstream from the point of minimum IOMMU granularity
1467 * due to aliases to the point where devices are protected from
1468 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1471 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
1475 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
1480 group
= iommu_group_get(&pdev
->dev
);
1486 * Look for existing groups on device aliases. If we alias another
1487 * device or another device aliases us, use the same group.
1489 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
1494 * Look for existing groups on non-isolated functions on the same
1495 * slot and aliases of those funcions, if any. No need to clear
1496 * the search bitmap, the tested devfns are still valid.
1498 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
1502 /* No shared group found, allocate new */
1503 return iommu_group_alloc();
1505 EXPORT_SYMBOL_GPL(pci_device_group
);
1507 /* Get the IOMMU group for device on fsl-mc bus */
1508 struct iommu_group
*fsl_mc_device_group(struct device
*dev
)
1510 struct device
*cont_dev
= fsl_mc_cont_dev(dev
);
1511 struct iommu_group
*group
;
1513 group
= iommu_group_get(cont_dev
);
1515 group
= iommu_group_alloc();
1518 EXPORT_SYMBOL_GPL(fsl_mc_device_group
);
1520 static int iommu_get_def_domain_type(struct device
*dev
)
1522 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1524 if (dev_is_pci(dev
) && to_pci_dev(dev
)->untrusted
)
1525 return IOMMU_DOMAIN_DMA
;
1527 if (ops
->def_domain_type
)
1528 return ops
->def_domain_type(dev
);
1533 static int iommu_group_alloc_default_domain(struct bus_type
*bus
,
1534 struct iommu_group
*group
,
1537 struct iommu_domain
*dom
;
1539 dom
= __iommu_domain_alloc(bus
, type
);
1540 if (!dom
&& type
!= IOMMU_DOMAIN_DMA
) {
1541 dom
= __iommu_domain_alloc(bus
, IOMMU_DOMAIN_DMA
);
1543 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1550 group
->default_domain
= dom
;
1552 group
->domain
= dom
;
1556 static int iommu_alloc_default_domain(struct iommu_group
*group
,
1561 if (group
->default_domain
)
1564 type
= iommu_get_def_domain_type(dev
) ? : iommu_def_domain_type
;
1566 return iommu_group_alloc_default_domain(dev
->bus
, group
, type
);
1570 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1571 * @dev: target device
1573 * This function is intended to be called by IOMMU drivers and extended to
1574 * support common, bus-defined algorithms when determining or creating the
1575 * IOMMU group for a device. On success, the caller will hold a reference
1576 * to the returned IOMMU group, which will already include the provided
1577 * device. The reference should be released with iommu_group_put().
1579 static struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
1581 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1582 struct iommu_group
*group
;
1585 group
= iommu_group_get(dev
);
1590 return ERR_PTR(-EINVAL
);
1592 group
= ops
->device_group(dev
);
1593 if (WARN_ON_ONCE(group
== NULL
))
1594 return ERR_PTR(-EINVAL
);
1599 ret
= iommu_group_add_device(group
, dev
);
1606 iommu_group_put(group
);
1608 return ERR_PTR(ret
);
1611 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
1613 return group
->default_domain
;
1616 static int probe_iommu_group(struct device
*dev
, void *data
)
1618 struct list_head
*group_list
= data
;
1619 struct iommu_group
*group
;
1622 /* Device is probed already if in a group */
1623 group
= iommu_group_get(dev
);
1625 iommu_group_put(group
);
1629 ret
= __iommu_probe_device(dev
, group_list
);
1636 static int remove_iommu_group(struct device
*dev
, void *data
)
1638 iommu_release_device(dev
);
1643 static int iommu_bus_notifier(struct notifier_block
*nb
,
1644 unsigned long action
, void *data
)
1646 unsigned long group_action
= 0;
1647 struct device
*dev
= data
;
1648 struct iommu_group
*group
;
1651 * ADD/DEL call into iommu driver ops if provided, which may
1652 * result in ADD/DEL notifiers to group->notifier
1654 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
1657 ret
= iommu_probe_device(dev
);
1658 return (ret
) ? NOTIFY_DONE
: NOTIFY_OK
;
1659 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
1660 iommu_release_device(dev
);
1665 * Remaining BUS_NOTIFYs get filtered and republished to the
1666 * group, if anyone is listening
1668 group
= iommu_group_get(dev
);
1673 case BUS_NOTIFY_BIND_DRIVER
:
1674 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
1676 case BUS_NOTIFY_BOUND_DRIVER
:
1677 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
1679 case BUS_NOTIFY_UNBIND_DRIVER
:
1680 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
1682 case BUS_NOTIFY_UNBOUND_DRIVER
:
1683 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
1688 blocking_notifier_call_chain(&group
->notifier
,
1691 iommu_group_put(group
);
1695 struct __group_domain_type
{
1700 static int probe_get_default_domain_type(struct device
*dev
, void *data
)
1702 struct __group_domain_type
*gtype
= data
;
1703 unsigned int type
= iommu_get_def_domain_type(dev
);
1706 if (gtype
->type
&& gtype
->type
!= type
) {
1707 dev_warn(dev
, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1708 iommu_domain_type_str(type
),
1709 dev_name(gtype
->dev
),
1710 iommu_domain_type_str(gtype
->type
));
1723 static void probe_alloc_default_domain(struct bus_type
*bus
,
1724 struct iommu_group
*group
)
1726 struct __group_domain_type gtype
;
1728 memset(>ype
, 0, sizeof(gtype
));
1730 /* Ask for default domain requirements of all devices in the group */
1731 __iommu_group_for_each_dev(group
, >ype
,
1732 probe_get_default_domain_type
);
1735 gtype
.type
= iommu_def_domain_type
;
1737 iommu_group_alloc_default_domain(bus
, group
, gtype
.type
);
1741 static int iommu_group_do_dma_attach(struct device
*dev
, void *data
)
1743 struct iommu_domain
*domain
= data
;
1746 if (!iommu_is_attach_deferred(domain
, dev
))
1747 ret
= __iommu_attach_device(domain
, dev
);
1752 static int __iommu_group_dma_attach(struct iommu_group
*group
)
1754 return __iommu_group_for_each_dev(group
, group
->default_domain
,
1755 iommu_group_do_dma_attach
);
1758 static int iommu_group_do_probe_finalize(struct device
*dev
, void *data
)
1760 struct iommu_domain
*domain
= data
;
1762 if (domain
->ops
->probe_finalize
)
1763 domain
->ops
->probe_finalize(dev
);
1768 static void __iommu_group_dma_finalize(struct iommu_group
*group
)
1770 __iommu_group_for_each_dev(group
, group
->default_domain
,
1771 iommu_group_do_probe_finalize
);
1774 static int iommu_do_create_direct_mappings(struct device
*dev
, void *data
)
1776 struct iommu_group
*group
= data
;
1778 iommu_create_device_direct_mappings(group
, dev
);
1783 static int iommu_group_create_direct_mappings(struct iommu_group
*group
)
1785 return __iommu_group_for_each_dev(group
, group
,
1786 iommu_do_create_direct_mappings
);
1789 int bus_iommu_probe(struct bus_type
*bus
)
1791 struct iommu_group
*group
, *next
;
1792 LIST_HEAD(group_list
);
1796 * This code-path does not allocate the default domain when
1797 * creating the iommu group, so do it after the groups are
1800 ret
= bus_for_each_dev(bus
, NULL
, &group_list
, probe_iommu_group
);
1804 list_for_each_entry_safe(group
, next
, &group_list
, entry
) {
1805 /* Remove item from the list */
1806 list_del_init(&group
->entry
);
1808 mutex_lock(&group
->mutex
);
1810 /* Try to allocate default domain */
1811 probe_alloc_default_domain(bus
, group
);
1813 if (!group
->default_domain
) {
1814 mutex_unlock(&group
->mutex
);
1818 iommu_group_create_direct_mappings(group
);
1820 ret
= __iommu_group_dma_attach(group
);
1822 mutex_unlock(&group
->mutex
);
1827 __iommu_group_dma_finalize(group
);
1833 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1835 struct notifier_block
*nb
;
1838 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1842 nb
->notifier_call
= iommu_bus_notifier
;
1844 err
= bus_register_notifier(bus
, nb
);
1848 err
= bus_iommu_probe(bus
);
1857 bus_for_each_dev(bus
, NULL
, NULL
, remove_iommu_group
);
1858 bus_unregister_notifier(bus
, nb
);
1867 * bus_set_iommu - set iommu-callbacks for the bus
1869 * @ops: the callbacks provided by the iommu-driver
1871 * This function is called by an iommu driver to set the iommu methods
1872 * used for a particular bus. Drivers for devices on that bus can use
1873 * the iommu-api after these ops are registered.
1874 * This special function is needed because IOMMUs are usually devices on
1875 * the bus itself, so the iommu drivers are not initialized when the bus
1876 * is set up. With this function the iommu-driver can set the iommu-ops
1879 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1884 bus
->iommu_ops
= NULL
;
1888 if (bus
->iommu_ops
!= NULL
)
1891 bus
->iommu_ops
= ops
;
1893 /* Do IOMMU specific setup for this bus-type */
1894 err
= iommu_bus_init(bus
, ops
);
1896 bus
->iommu_ops
= NULL
;
1900 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1902 bool iommu_present(struct bus_type
*bus
)
1904 return bus
->iommu_ops
!= NULL
;
1906 EXPORT_SYMBOL_GPL(iommu_present
);
1908 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1910 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1913 return bus
->iommu_ops
->capable(cap
);
1915 EXPORT_SYMBOL_GPL(iommu_capable
);
1918 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1919 * @domain: iommu domain
1920 * @handler: fault handler
1921 * @token: user data, will be passed back to the fault handler
1923 * This function should be used by IOMMU users which want to be notified
1924 * whenever an IOMMU fault happens.
1926 * The fault handler itself should return 0 on success, and an appropriate
1927 * error code otherwise.
1929 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1930 iommu_fault_handler_t handler
,
1935 domain
->handler
= handler
;
1936 domain
->handler_token
= token
;
1938 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1940 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1943 struct iommu_domain
*domain
;
1945 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1948 domain
= bus
->iommu_ops
->domain_alloc(type
);
1952 domain
->ops
= bus
->iommu_ops
;
1953 domain
->type
= type
;
1954 /* Assume all sizes by default; the driver may override this later */
1955 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1957 /* Temporarily avoid -EEXIST while drivers still get their own cookies */
1958 if (iommu_is_dma_domain(domain
) && !domain
->iova_cookie
&& iommu_get_dma_cookie(domain
)) {
1959 iommu_domain_free(domain
);
1965 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1967 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1969 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1971 void iommu_domain_free(struct iommu_domain
*domain
)
1973 iommu_put_dma_cookie(domain
);
1974 domain
->ops
->domain_free(domain
);
1976 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1978 static int __iommu_attach_device(struct iommu_domain
*domain
,
1983 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1986 ret
= domain
->ops
->attach_dev(domain
, dev
);
1988 trace_attach_device_to_domain(dev
);
1992 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1994 struct iommu_group
*group
;
1997 group
= iommu_group_get(dev
);
2002 * Lock the group to make sure the device-count doesn't
2003 * change while we are attaching
2005 mutex_lock(&group
->mutex
);
2007 if (iommu_group_device_count(group
) != 1)
2010 ret
= __iommu_attach_group(domain
, group
);
2013 mutex_unlock(&group
->mutex
);
2014 iommu_group_put(group
);
2018 EXPORT_SYMBOL_GPL(iommu_attach_device
);
2020 int iommu_deferred_attach(struct device
*dev
, struct iommu_domain
*domain
)
2022 const struct iommu_ops
*ops
= domain
->ops
;
2024 if (ops
->is_attach_deferred
&& ops
->is_attach_deferred(domain
, dev
))
2025 return __iommu_attach_device(domain
, dev
);
2031 * Check flags and other user provided data for valid combinations. We also
2032 * make sure no reserved fields or unused flags are set. This is to ensure
2033 * not breaking userspace in the future when these fields or flags are used.
2035 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info
*info
)
2040 if (info
->version
!= IOMMU_CACHE_INVALIDATE_INFO_VERSION_1
)
2043 mask
= (1 << IOMMU_CACHE_INV_TYPE_NR
) - 1;
2044 if (info
->cache
& ~mask
)
2047 if (info
->granularity
>= IOMMU_INV_GRANU_NR
)
2050 switch (info
->granularity
) {
2051 case IOMMU_INV_GRANU_ADDR
:
2052 if (info
->cache
& IOMMU_CACHE_INV_TYPE_PASID
)
2055 mask
= IOMMU_INV_ADDR_FLAGS_PASID
|
2056 IOMMU_INV_ADDR_FLAGS_ARCHID
|
2057 IOMMU_INV_ADDR_FLAGS_LEAF
;
2059 if (info
->granu
.addr_info
.flags
& ~mask
)
2062 case IOMMU_INV_GRANU_PASID
:
2063 mask
= IOMMU_INV_PASID_FLAGS_PASID
|
2064 IOMMU_INV_PASID_FLAGS_ARCHID
;
2065 if (info
->granu
.pasid_info
.flags
& ~mask
)
2069 case IOMMU_INV_GRANU_DOMAIN
:
2070 if (info
->cache
& IOMMU_CACHE_INV_TYPE_DEV_IOTLB
)
2077 /* Check reserved padding fields */
2078 for (i
= 0; i
< sizeof(info
->padding
); i
++) {
2079 if (info
->padding
[i
])
2086 int iommu_uapi_cache_invalidate(struct iommu_domain
*domain
, struct device
*dev
,
2089 struct iommu_cache_invalidate_info inv_info
= { 0 };
2093 if (unlikely(!domain
->ops
->cache_invalidate
))
2097 * No new spaces can be added before the variable sized union, the
2098 * minimum size is the offset to the union.
2100 minsz
= offsetof(struct iommu_cache_invalidate_info
, granu
);
2102 /* Copy minsz from user to get flags and argsz */
2103 if (copy_from_user(&inv_info
, uinfo
, minsz
))
2106 /* Fields before the variable size union are mandatory */
2107 if (inv_info
.argsz
< minsz
)
2110 /* PASID and address granu require additional info beyond minsz */
2111 if (inv_info
.granularity
== IOMMU_INV_GRANU_PASID
&&
2112 inv_info
.argsz
< offsetofend(struct iommu_cache_invalidate_info
, granu
.pasid_info
))
2115 if (inv_info
.granularity
== IOMMU_INV_GRANU_ADDR
&&
2116 inv_info
.argsz
< offsetofend(struct iommu_cache_invalidate_info
, granu
.addr_info
))
2120 * User might be using a newer UAPI header which has a larger data
2121 * size, we shall support the existing flags within the current
2122 * size. Copy the remaining user data _after_ minsz but not more
2123 * than the current kernel supported size.
2125 if (copy_from_user((void *)&inv_info
+ minsz
, uinfo
+ minsz
,
2126 min_t(u32
, inv_info
.argsz
, sizeof(inv_info
)) - minsz
))
2129 /* Now the argsz is validated, check the content */
2130 ret
= iommu_check_cache_invl_data(&inv_info
);
2134 return domain
->ops
->cache_invalidate(domain
, dev
, &inv_info
);
2136 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate
);
2138 static int iommu_check_bind_data(struct iommu_gpasid_bind_data
*data
)
2143 if (data
->version
!= IOMMU_GPASID_BIND_VERSION_1
)
2146 /* Check the range of supported formats */
2147 if (data
->format
>= IOMMU_PASID_FORMAT_LAST
)
2150 /* Check all flags */
2151 mask
= IOMMU_SVA_GPASID_VAL
;
2152 if (data
->flags
& ~mask
)
2155 /* Check reserved padding fields */
2156 for (i
= 0; i
< sizeof(data
->padding
); i
++) {
2157 if (data
->padding
[i
])
2164 static int iommu_sva_prepare_bind_data(void __user
*udata
,
2165 struct iommu_gpasid_bind_data
*data
)
2170 * No new spaces can be added before the variable sized union, the
2171 * minimum size is the offset to the union.
2173 minsz
= offsetof(struct iommu_gpasid_bind_data
, vendor
);
2175 /* Copy minsz from user to get flags and argsz */
2176 if (copy_from_user(data
, udata
, minsz
))
2179 /* Fields before the variable size union are mandatory */
2180 if (data
->argsz
< minsz
)
2183 * User might be using a newer UAPI header, we shall let IOMMU vendor
2184 * driver decide on what size it needs. Since the guest PASID bind data
2185 * can be vendor specific, larger argsz could be the result of extension
2186 * for one vendor but it should not affect another vendor.
2187 * Copy the remaining user data _after_ minsz
2189 if (copy_from_user((void *)data
+ minsz
, udata
+ minsz
,
2190 min_t(u32
, data
->argsz
, sizeof(*data
)) - minsz
))
2193 return iommu_check_bind_data(data
);
2196 int iommu_uapi_sva_bind_gpasid(struct iommu_domain
*domain
, struct device
*dev
,
2199 struct iommu_gpasid_bind_data data
= { 0 };
2202 if (unlikely(!domain
->ops
->sva_bind_gpasid
))
2205 ret
= iommu_sva_prepare_bind_data(udata
, &data
);
2209 return domain
->ops
->sva_bind_gpasid(domain
, dev
, &data
);
2211 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid
);
2213 int iommu_sva_unbind_gpasid(struct iommu_domain
*domain
, struct device
*dev
,
2216 if (unlikely(!domain
->ops
->sva_unbind_gpasid
))
2219 return domain
->ops
->sva_unbind_gpasid(dev
, pasid
);
2221 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid
);
2223 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain
*domain
, struct device
*dev
,
2226 struct iommu_gpasid_bind_data data
= { 0 };
2229 if (unlikely(!domain
->ops
->sva_bind_gpasid
))
2232 ret
= iommu_sva_prepare_bind_data(udata
, &data
);
2236 return iommu_sva_unbind_gpasid(domain
, dev
, data
.hpasid
);
2238 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid
);
2240 static void __iommu_detach_device(struct iommu_domain
*domain
,
2243 if (iommu_is_attach_deferred(domain
, dev
))
2246 if (unlikely(domain
->ops
->detach_dev
== NULL
))
2249 domain
->ops
->detach_dev(domain
, dev
);
2250 trace_detach_device_from_domain(dev
);
2253 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
2255 struct iommu_group
*group
;
2257 group
= iommu_group_get(dev
);
2261 mutex_lock(&group
->mutex
);
2262 if (iommu_group_device_count(group
) != 1) {
2267 __iommu_detach_group(domain
, group
);
2270 mutex_unlock(&group
->mutex
);
2271 iommu_group_put(group
);
2273 EXPORT_SYMBOL_GPL(iommu_detach_device
);
2275 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
2277 struct iommu_domain
*domain
;
2278 struct iommu_group
*group
;
2280 group
= iommu_group_get(dev
);
2284 domain
= group
->domain
;
2286 iommu_group_put(group
);
2290 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
2293 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2294 * guarantees that the group and its default domain are valid and correct.
2296 struct iommu_domain
*iommu_get_dma_domain(struct device
*dev
)
2298 return dev
->iommu_group
->default_domain
;
2302 * IOMMU groups are really the natural working unit of the IOMMU, but
2303 * the IOMMU API works on domains and devices. Bridge that gap by
2304 * iterating over the devices in a group. Ideally we'd have a single
2305 * device which represents the requestor ID of the group, but we also
2306 * allow IOMMU drivers to create policy defined minimum sets, where
2307 * the physical hardware may be able to distiguish members, but we
2308 * wish to group them at a higher level (ex. untrusted multi-function
2309 * PCI devices). Thus we attach each device.
2311 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
2313 struct iommu_domain
*domain
= data
;
2315 return __iommu_attach_device(domain
, dev
);
2318 static int __iommu_attach_group(struct iommu_domain
*domain
,
2319 struct iommu_group
*group
)
2323 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
2326 ret
= __iommu_group_for_each_dev(group
, domain
,
2327 iommu_group_do_attach_device
);
2329 group
->domain
= domain
;
2334 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
2338 mutex_lock(&group
->mutex
);
2339 ret
= __iommu_attach_group(domain
, group
);
2340 mutex_unlock(&group
->mutex
);
2344 EXPORT_SYMBOL_GPL(iommu_attach_group
);
2346 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
2348 struct iommu_domain
*domain
= data
;
2350 __iommu_detach_device(domain
, dev
);
2355 static void __iommu_detach_group(struct iommu_domain
*domain
,
2356 struct iommu_group
*group
)
2360 if (!group
->default_domain
) {
2361 __iommu_group_for_each_dev(group
, domain
,
2362 iommu_group_do_detach_device
);
2363 group
->domain
= NULL
;
2367 if (group
->domain
== group
->default_domain
)
2370 /* Detach by re-attaching to the default domain */
2371 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
2372 iommu_group_do_attach_device
);
2376 group
->domain
= group
->default_domain
;
2379 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
2381 mutex_lock(&group
->mutex
);
2382 __iommu_detach_group(domain
, group
);
2383 mutex_unlock(&group
->mutex
);
2385 EXPORT_SYMBOL_GPL(iommu_detach_group
);
2387 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
2389 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
)
2392 if (domain
->type
== IOMMU_DOMAIN_BLOCKED
)
2395 return domain
->ops
->iova_to_phys(domain
, iova
);
2397 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
2399 static size_t iommu_pgsize(struct iommu_domain
*domain
, unsigned long iova
,
2400 phys_addr_t paddr
, size_t size
, size_t *count
)
2402 unsigned int pgsize_idx
, pgsize_idx_next
;
2403 unsigned long pgsizes
;
2404 size_t offset
, pgsize
, pgsize_next
;
2405 unsigned long addr_merge
= paddr
| iova
;
2407 /* Page sizes supported by the hardware and small enough for @size */
2408 pgsizes
= domain
->pgsize_bitmap
& GENMASK(__fls(size
), 0);
2410 /* Constrain the page sizes further based on the maximum alignment */
2411 if (likely(addr_merge
))
2412 pgsizes
&= GENMASK(__ffs(addr_merge
), 0);
2414 /* Make sure we have at least one suitable page size */
2417 /* Pick the biggest page size remaining */
2418 pgsize_idx
= __fls(pgsizes
);
2419 pgsize
= BIT(pgsize_idx
);
2423 /* Find the next biggest support page size, if it exists */
2424 pgsizes
= domain
->pgsize_bitmap
& ~GENMASK(pgsize_idx
, 0);
2428 pgsize_idx_next
= __ffs(pgsizes
);
2429 pgsize_next
= BIT(pgsize_idx_next
);
2432 * There's no point trying a bigger page size unless the virtual
2433 * and physical addresses are similarly offset within the larger page.
2435 if ((iova
^ paddr
) & (pgsize_next
- 1))
2438 /* Calculate the offset to the next page size alignment boundary */
2439 offset
= pgsize_next
- (addr_merge
& (pgsize_next
- 1));
2442 * If size is big enough to accommodate the larger page, reduce
2443 * the number of smaller pages.
2445 if (offset
+ pgsize_next
<= size
)
2449 *count
= size
>> pgsize_idx
;
2453 static int __iommu_map_pages(struct iommu_domain
*domain
, unsigned long iova
,
2454 phys_addr_t paddr
, size_t size
, int prot
,
2455 gfp_t gfp
, size_t *mapped
)
2457 const struct iommu_ops
*ops
= domain
->ops
;
2458 size_t pgsize
, count
;
2461 pgsize
= iommu_pgsize(domain
, iova
, paddr
, size
, &count
);
2463 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2464 iova
, &paddr
, pgsize
, count
);
2466 if (ops
->map_pages
) {
2467 ret
= ops
->map_pages(domain
, iova
, paddr
, pgsize
, count
, prot
,
2470 ret
= ops
->map(domain
, iova
, paddr
, pgsize
, prot
, gfp
);
2471 *mapped
= ret
? 0 : pgsize
;
2477 static int __iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
2478 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
2480 const struct iommu_ops
*ops
= domain
->ops
;
2481 unsigned long orig_iova
= iova
;
2482 unsigned int min_pagesz
;
2483 size_t orig_size
= size
;
2484 phys_addr_t orig_paddr
= paddr
;
2487 if (unlikely(!(ops
->map
|| ops
->map_pages
) ||
2488 domain
->pgsize_bitmap
== 0UL))
2491 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
2494 /* find out the minimum page size supported */
2495 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
2498 * both the virtual address and the physical one, as well as
2499 * the size of the mapping, must be aligned (at least) to the
2500 * size of the smallest page supported by the hardware
2502 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
2503 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2504 iova
, &paddr
, size
, min_pagesz
);
2508 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
2513 ret
= __iommu_map_pages(domain
, iova
, paddr
, size
, prot
, gfp
,
2516 * Some pages may have been mapped, even if an error occurred,
2517 * so we should account for those so they can be unmapped.
2528 /* unroll mapping in case something went wrong */
2530 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
2532 trace_map(orig_iova
, orig_paddr
, orig_size
);
2537 static int _iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
2538 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
2540 const struct iommu_ops
*ops
= domain
->ops
;
2543 ret
= __iommu_map(domain
, iova
, paddr
, size
, prot
, gfp
);
2544 if (ret
== 0 && ops
->iotlb_sync_map
)
2545 ops
->iotlb_sync_map(domain
, iova
, size
);
2550 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
2551 phys_addr_t paddr
, size_t size
, int prot
)
2554 return _iommu_map(domain
, iova
, paddr
, size
, prot
, GFP_KERNEL
);
2556 EXPORT_SYMBOL_GPL(iommu_map
);
2558 int iommu_map_atomic(struct iommu_domain
*domain
, unsigned long iova
,
2559 phys_addr_t paddr
, size_t size
, int prot
)
2561 return _iommu_map(domain
, iova
, paddr
, size
, prot
, GFP_ATOMIC
);
2563 EXPORT_SYMBOL_GPL(iommu_map_atomic
);
2565 static size_t __iommu_unmap_pages(struct iommu_domain
*domain
,
2566 unsigned long iova
, size_t size
,
2567 struct iommu_iotlb_gather
*iotlb_gather
)
2569 const struct iommu_ops
*ops
= domain
->ops
;
2570 size_t pgsize
, count
;
2572 pgsize
= iommu_pgsize(domain
, iova
, iova
, size
, &count
);
2573 return ops
->unmap_pages
?
2574 ops
->unmap_pages(domain
, iova
, pgsize
, count
, iotlb_gather
) :
2575 ops
->unmap(domain
, iova
, pgsize
, iotlb_gather
);
2578 static size_t __iommu_unmap(struct iommu_domain
*domain
,
2579 unsigned long iova
, size_t size
,
2580 struct iommu_iotlb_gather
*iotlb_gather
)
2582 const struct iommu_ops
*ops
= domain
->ops
;
2583 size_t unmapped_page
, unmapped
= 0;
2584 unsigned long orig_iova
= iova
;
2585 unsigned int min_pagesz
;
2587 if (unlikely(!(ops
->unmap
|| ops
->unmap_pages
) ||
2588 domain
->pgsize_bitmap
== 0UL))
2591 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
2594 /* find out the minimum page size supported */
2595 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
2598 * The virtual address, as well as the size of the mapping, must be
2599 * aligned (at least) to the size of the smallest page supported
2602 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
2603 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2604 iova
, size
, min_pagesz
);
2608 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
2611 * Keep iterating until we either unmap 'size' bytes (or more)
2612 * or we hit an area that isn't mapped.
2614 while (unmapped
< size
) {
2615 unmapped_page
= __iommu_unmap_pages(domain
, iova
,
2621 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2622 iova
, unmapped_page
);
2624 iova
+= unmapped_page
;
2625 unmapped
+= unmapped_page
;
2628 trace_unmap(orig_iova
, size
, unmapped
);
2632 size_t iommu_unmap(struct iommu_domain
*domain
,
2633 unsigned long iova
, size_t size
)
2635 struct iommu_iotlb_gather iotlb_gather
;
2638 iommu_iotlb_gather_init(&iotlb_gather
);
2639 ret
= __iommu_unmap(domain
, iova
, size
, &iotlb_gather
);
2640 iommu_iotlb_sync(domain
, &iotlb_gather
);
2644 EXPORT_SYMBOL_GPL(iommu_unmap
);
2646 size_t iommu_unmap_fast(struct iommu_domain
*domain
,
2647 unsigned long iova
, size_t size
,
2648 struct iommu_iotlb_gather
*iotlb_gather
)
2650 return __iommu_unmap(domain
, iova
, size
, iotlb_gather
);
2652 EXPORT_SYMBOL_GPL(iommu_unmap_fast
);
2654 static ssize_t
__iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
2655 struct scatterlist
*sg
, unsigned int nents
, int prot
,
2658 const struct iommu_ops
*ops
= domain
->ops
;
2659 size_t len
= 0, mapped
= 0;
2664 while (i
<= nents
) {
2665 phys_addr_t s_phys
= sg_phys(sg
);
2667 if (len
&& s_phys
!= start
+ len
) {
2668 ret
= __iommu_map(domain
, iova
+ mapped
, start
,
2689 if (ops
->iotlb_sync_map
)
2690 ops
->iotlb_sync_map(domain
, iova
, mapped
);
2694 /* undo mappings already done */
2695 iommu_unmap(domain
, iova
, mapped
);
2700 ssize_t
iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
2701 struct scatterlist
*sg
, unsigned int nents
, int prot
)
2704 return __iommu_map_sg(domain
, iova
, sg
, nents
, prot
, GFP_KERNEL
);
2706 EXPORT_SYMBOL_GPL(iommu_map_sg
);
2708 ssize_t
iommu_map_sg_atomic(struct iommu_domain
*domain
, unsigned long iova
,
2709 struct scatterlist
*sg
, unsigned int nents
, int prot
)
2711 return __iommu_map_sg(domain
, iova
, sg
, nents
, prot
, GFP_ATOMIC
);
2715 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2716 * @domain: the iommu domain where the fault has happened
2717 * @dev: the device where the fault has happened
2718 * @iova: the faulting address
2719 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2721 * This function should be called by the low-level IOMMU implementations
2722 * whenever IOMMU faults happen, to allow high-level users, that are
2723 * interested in such events, to know about them.
2725 * This event may be useful for several possible use cases:
2726 * - mere logging of the event
2727 * - dynamic TLB/PTE loading
2728 * - if restarting of the faulting device is required
2730 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2731 * PTE/TLB loading will one day be supported, implementations will be able
2732 * to tell whether it succeeded or not according to this return value).
2734 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2735 * (though fault handlers can also return -ENOSYS, in case they want to
2736 * elicit the default behavior of the IOMMU drivers).
2738 int report_iommu_fault(struct iommu_domain
*domain
, struct device
*dev
,
2739 unsigned long iova
, int flags
)
2744 * if upper layers showed interest and installed a fault handler,
2747 if (domain
->handler
)
2748 ret
= domain
->handler(domain
, dev
, iova
, flags
,
2749 domain
->handler_token
);
2751 trace_io_page_fault(dev
, iova
, flags
);
2754 EXPORT_SYMBOL_GPL(report_iommu_fault
);
2756 static int __init
iommu_init(void)
2758 iommu_group_kset
= kset_create_and_add("iommu_groups",
2760 BUG_ON(!iommu_group_kset
);
2762 iommu_debugfs_setup();
2766 core_initcall(iommu_init
);
2768 int iommu_enable_nesting(struct iommu_domain
*domain
)
2770 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
2772 if (!domain
->ops
->enable_nesting
)
2774 return domain
->ops
->enable_nesting(domain
);
2776 EXPORT_SYMBOL_GPL(iommu_enable_nesting
);
2778 int iommu_set_pgtable_quirks(struct iommu_domain
*domain
,
2779 unsigned long quirk
)
2781 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
2783 if (!domain
->ops
->set_pgtable_quirks
)
2785 return domain
->ops
->set_pgtable_quirks(domain
, quirk
);
2787 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks
);
2789 void iommu_get_resv_regions(struct device
*dev
, struct list_head
*list
)
2791 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2793 if (ops
&& ops
->get_resv_regions
)
2794 ops
->get_resv_regions(dev
, list
);
2797 void iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
2799 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
2801 if (ops
&& ops
->put_resv_regions
)
2802 ops
->put_resv_regions(dev
, list
);
2806 * generic_iommu_put_resv_regions - Reserved region driver helper
2807 * @dev: device for which to free reserved regions
2808 * @list: reserved region list for device
2810 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2811 * for simple reservations. Memory allocated for each reserved region will be
2812 * freed. If an IOMMU driver allocates additional resources per region, it is
2813 * going to have to implement a custom callback.
2815 void generic_iommu_put_resv_regions(struct device
*dev
, struct list_head
*list
)
2817 struct iommu_resv_region
*entry
, *next
;
2819 list_for_each_entry_safe(entry
, next
, list
, list
)
2822 EXPORT_SYMBOL(generic_iommu_put_resv_regions
);
2824 struct iommu_resv_region
*iommu_alloc_resv_region(phys_addr_t start
,
2825 size_t length
, int prot
,
2826 enum iommu_resv_type type
)
2828 struct iommu_resv_region
*region
;
2830 region
= kzalloc(sizeof(*region
), GFP_KERNEL
);
2834 INIT_LIST_HEAD(®ion
->list
);
2835 region
->start
= start
;
2836 region
->length
= length
;
2837 region
->prot
= prot
;
2838 region
->type
= type
;
2841 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region
);
2843 void iommu_set_default_passthrough(bool cmd_line
)
2846 iommu_cmd_line
|= IOMMU_CMD_LINE_DMA_API
;
2847 iommu_def_domain_type
= IOMMU_DOMAIN_IDENTITY
;
2850 void iommu_set_default_translated(bool cmd_line
)
2853 iommu_cmd_line
|= IOMMU_CMD_LINE_DMA_API
;
2854 iommu_def_domain_type
= IOMMU_DOMAIN_DMA
;
2857 bool iommu_default_passthrough(void)
2859 return iommu_def_domain_type
== IOMMU_DOMAIN_IDENTITY
;
2861 EXPORT_SYMBOL_GPL(iommu_default_passthrough
);
2863 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
2865 const struct iommu_ops
*ops
= NULL
;
2866 struct iommu_device
*iommu
;
2868 spin_lock(&iommu_device_lock
);
2869 list_for_each_entry(iommu
, &iommu_device_list
, list
)
2870 if (iommu
->fwnode
== fwnode
) {
2874 spin_unlock(&iommu_device_lock
);
2878 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
2879 const struct iommu_ops
*ops
)
2881 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2884 return ops
== fwspec
->ops
? 0 : -EINVAL
;
2886 if (!dev_iommu_get(dev
))
2889 /* Preallocate for the overwhelmingly common case of 1 ID */
2890 fwspec
= kzalloc(struct_size(fwspec
, ids
, 1), GFP_KERNEL
);
2894 of_node_get(to_of_node(iommu_fwnode
));
2895 fwspec
->iommu_fwnode
= iommu_fwnode
;
2897 dev_iommu_fwspec_set(dev
, fwspec
);
2900 EXPORT_SYMBOL_GPL(iommu_fwspec_init
);
2902 void iommu_fwspec_free(struct device
*dev
)
2904 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2907 fwnode_handle_put(fwspec
->iommu_fwnode
);
2909 dev_iommu_fwspec_set(dev
, NULL
);
2912 EXPORT_SYMBOL_GPL(iommu_fwspec_free
);
2914 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
)
2916 struct iommu_fwspec
*fwspec
= dev_iommu_fwspec_get(dev
);
2922 new_num
= fwspec
->num_ids
+ num_ids
;
2924 fwspec
= krealloc(fwspec
, struct_size(fwspec
, ids
, new_num
),
2929 dev_iommu_fwspec_set(dev
, fwspec
);
2932 for (i
= 0; i
< num_ids
; i
++)
2933 fwspec
->ids
[fwspec
->num_ids
+ i
] = ids
[i
];
2935 fwspec
->num_ids
= new_num
;
2938 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids
);
2941 * Per device IOMMU features.
2943 int iommu_dev_enable_feature(struct device
*dev
, enum iommu_dev_features feat
)
2945 if (dev
->iommu
&& dev
->iommu
->iommu_dev
) {
2946 const struct iommu_ops
*ops
= dev
->iommu
->iommu_dev
->ops
;
2948 if (ops
->dev_enable_feat
)
2949 return ops
->dev_enable_feat(dev
, feat
);
2954 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature
);
2957 * The device drivers should do the necessary cleanups before calling this.
2958 * For example, before disabling the aux-domain feature, the device driver
2959 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2961 int iommu_dev_disable_feature(struct device
*dev
, enum iommu_dev_features feat
)
2963 if (dev
->iommu
&& dev
->iommu
->iommu_dev
) {
2964 const struct iommu_ops
*ops
= dev
->iommu
->iommu_dev
->ops
;
2966 if (ops
->dev_disable_feat
)
2967 return ops
->dev_disable_feat(dev
, feat
);
2972 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature
);
2974 bool iommu_dev_feature_enabled(struct device
*dev
, enum iommu_dev_features feat
)
2976 if (dev
->iommu
&& dev
->iommu
->iommu_dev
) {
2977 const struct iommu_ops
*ops
= dev
->iommu
->iommu_dev
->ops
;
2979 if (ops
->dev_feat_enabled
)
2980 return ops
->dev_feat_enabled(dev
, feat
);
2985 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled
);
2988 * Aux-domain specific attach/detach.
2990 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2991 * true. Also, as long as domains are attached to a device through this
2992 * interface, any tries to call iommu_attach_device() should fail
2993 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2994 * This should make us safe against a device being attached to a guest as a
2995 * whole while there are still pasid users on it (aux and sva).
2997 int iommu_aux_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
3001 if (domain
->ops
->aux_attach_dev
)
3002 ret
= domain
->ops
->aux_attach_dev(domain
, dev
);
3005 trace_attach_device_to_domain(dev
);
3009 EXPORT_SYMBOL_GPL(iommu_aux_attach_device
);
3011 void iommu_aux_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
3013 if (domain
->ops
->aux_detach_dev
) {
3014 domain
->ops
->aux_detach_dev(domain
, dev
);
3015 trace_detach_device_from_domain(dev
);
3018 EXPORT_SYMBOL_GPL(iommu_aux_detach_device
);
3020 int iommu_aux_get_pasid(struct iommu_domain
*domain
, struct device
*dev
)
3024 if (domain
->ops
->aux_get_pasid
)
3025 ret
= domain
->ops
->aux_get_pasid(domain
, dev
);
3029 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid
);
3032 * iommu_sva_bind_device() - Bind a process address space to a device
3034 * @mm: the mm to bind, caller must hold a reference to it
3036 * Create a bond between device and address space, allowing the device to access
3037 * the mm using the returned PASID. If a bond already exists between @device and
3038 * @mm, it is returned and an additional reference is taken. Caller must call
3039 * iommu_sva_unbind_device() to release each reference.
3041 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
3042 * initialize the required SVA features.
3044 * On error, returns an ERR_PTR value.
3047 iommu_sva_bind_device(struct device
*dev
, struct mm_struct
*mm
, void *drvdata
)
3049 struct iommu_group
*group
;
3050 struct iommu_sva
*handle
= ERR_PTR(-EINVAL
);
3051 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
3053 if (!ops
|| !ops
->sva_bind
)
3054 return ERR_PTR(-ENODEV
);
3056 group
= iommu_group_get(dev
);
3058 return ERR_PTR(-ENODEV
);
3060 /* Ensure device count and domain don't change while we're binding */
3061 mutex_lock(&group
->mutex
);
3064 * To keep things simple, SVA currently doesn't support IOMMU groups
3065 * with more than one device. Existing SVA-capable systems are not
3066 * affected by the problems that required IOMMU groups (lack of ACS
3067 * isolation, device ID aliasing and other hardware issues).
3069 if (iommu_group_device_count(group
) != 1)
3072 handle
= ops
->sva_bind(dev
, mm
, drvdata
);
3075 mutex_unlock(&group
->mutex
);
3076 iommu_group_put(group
);
3080 EXPORT_SYMBOL_GPL(iommu_sva_bind_device
);
3083 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3084 * @handle: the handle returned by iommu_sva_bind_device()
3086 * Put reference to a bond between device and address space. The device should
3087 * not be issuing any more transaction for this PASID. All outstanding page
3088 * requests for this PASID must have been flushed to the IOMMU.
3090 void iommu_sva_unbind_device(struct iommu_sva
*handle
)
3092 struct iommu_group
*group
;
3093 struct device
*dev
= handle
->dev
;
3094 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
3096 if (!ops
|| !ops
->sva_unbind
)
3099 group
= iommu_group_get(dev
);
3103 mutex_lock(&group
->mutex
);
3104 ops
->sva_unbind(handle
);
3105 mutex_unlock(&group
->mutex
);
3107 iommu_group_put(group
);
3109 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device
);
3111 u32
iommu_sva_get_pasid(struct iommu_sva
*handle
)
3113 const struct iommu_ops
*ops
= handle
->dev
->bus
->iommu_ops
;
3115 if (!ops
|| !ops
->sva_get_pasid
)
3116 return IOMMU_PASID_INVALID
;
3118 return ops
->sva_get_pasid(handle
);
3120 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid
);
3123 * Changes the default domain of an iommu group that has *only* one device
3125 * @group: The group for which the default domain should be changed
3126 * @prev_dev: The device in the group (this is used to make sure that the device
3127 * hasn't changed after the caller has called this function)
3128 * @type: The type of the new default domain that gets associated with the group
3130 * Returns 0 on success and error code on failure
3133 * 1. Presently, this function is called only when user requests to change the
3134 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
3135 * Please take a closer look if intended to use for other purposes.
3137 static int iommu_change_dev_def_domain(struct iommu_group
*group
,
3138 struct device
*prev_dev
, int type
)
3140 struct iommu_domain
*prev_dom
;
3141 struct group_device
*grp_dev
;
3142 int ret
, dev_def_dom
;
3145 mutex_lock(&group
->mutex
);
3147 if (group
->default_domain
!= group
->domain
) {
3148 dev_err_ratelimited(prev_dev
, "Group not assigned to default domain\n");
3154 * iommu group wasn't locked while acquiring device lock in
3155 * iommu_group_store_type(). So, make sure that the device count hasn't
3156 * changed while acquiring device lock.
3158 * Changing default domain of an iommu group with two or more devices
3159 * isn't supported because there could be a potential deadlock. Consider
3160 * the following scenario. T1 is trying to acquire device locks of all
3161 * the devices in the group and before it could acquire all of them,
3162 * there could be another thread T2 (from different sub-system and use
3163 * case) that has already acquired some of the device locks and might be
3164 * waiting for T1 to release other device locks.
3166 if (iommu_group_device_count(group
) != 1) {
3167 dev_err_ratelimited(prev_dev
, "Cannot change default domain: Group has more than one device\n");
3172 /* Since group has only one device */
3173 grp_dev
= list_first_entry(&group
->devices
, struct group_device
, list
);
3176 if (prev_dev
!= dev
) {
3177 dev_err_ratelimited(prev_dev
, "Cannot change default domain: Device has been changed\n");
3182 prev_dom
= group
->default_domain
;
3188 dev_def_dom
= iommu_get_def_domain_type(dev
);
3191 * If the user hasn't requested any specific type of domain and
3192 * if the device supports both the domains, then default to the
3193 * domain the device was booted with
3195 type
= dev_def_dom
? : iommu_def_domain_type
;
3196 } else if (dev_def_dom
&& type
!= dev_def_dom
) {
3197 dev_err_ratelimited(prev_dev
, "Device cannot be in %s domain\n",
3198 iommu_domain_type_str(type
));
3204 * Switch to a new domain only if the requested domain type is different
3205 * from the existing default domain type
3207 if (prev_dom
->type
== type
) {
3212 /* We can bring up a flush queue without tearing down the domain */
3213 if (type
== IOMMU_DOMAIN_DMA_FQ
&& prev_dom
->type
== IOMMU_DOMAIN_DMA
) {
3214 ret
= iommu_dma_init_fq(prev_dom
);
3216 prev_dom
->type
= IOMMU_DOMAIN_DMA_FQ
;
3220 /* Sets group->default_domain to the newly allocated domain */
3221 ret
= iommu_group_alloc_default_domain(dev
->bus
, group
, type
);
3225 ret
= iommu_create_device_direct_mappings(group
, dev
);
3227 goto free_new_domain
;
3229 ret
= __iommu_attach_device(group
->default_domain
, dev
);
3231 goto free_new_domain
;
3233 group
->domain
= group
->default_domain
;
3236 * Release the mutex here because ops->probe_finalize() call-back of
3237 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3238 * in-turn might call back into IOMMU core code, where it tries to take
3239 * group->mutex, resulting in a deadlock.
3241 mutex_unlock(&group
->mutex
);
3243 /* Make sure dma_ops is appropriatley set */
3244 iommu_group_do_probe_finalize(dev
, group
->default_domain
);
3245 iommu_domain_free(prev_dom
);
3249 iommu_domain_free(group
->default_domain
);
3250 group
->default_domain
= prev_dom
;
3251 group
->domain
= prev_dom
;
3254 mutex_unlock(&group
->mutex
);
3260 * Changing the default domain through sysfs requires the users to unbind the
3261 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3262 * transition. Return failure if this isn't met.
3264 * We need to consider the race between this and the device release path.
3265 * device_lock(dev) is used here to guarantee that the device release path
3266 * will not be entered at the same time.
3268 static ssize_t
iommu_group_store_type(struct iommu_group
*group
,
3269 const char *buf
, size_t count
)
3271 struct group_device
*grp_dev
;
3275 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
3278 if (WARN_ON(!group
))
3281 if (sysfs_streq(buf
, "identity"))
3282 req_type
= IOMMU_DOMAIN_IDENTITY
;
3283 else if (sysfs_streq(buf
, "DMA"))
3284 req_type
= IOMMU_DOMAIN_DMA
;
3285 else if (sysfs_streq(buf
, "DMA-FQ"))
3286 req_type
= IOMMU_DOMAIN_DMA_FQ
;
3287 else if (sysfs_streq(buf
, "auto"))
3293 * Lock/Unlock the group mutex here before device lock to
3294 * 1. Make sure that the iommu group has only one device (this is a
3295 * prerequisite for step 2)
3296 * 2. Get struct *dev which is needed to lock device
3298 mutex_lock(&group
->mutex
);
3299 if (iommu_group_device_count(group
) != 1) {
3300 mutex_unlock(&group
->mutex
);
3301 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3305 /* Since group has only one device */
3306 grp_dev
= list_first_entry(&group
->devices
, struct group_device
, list
);
3311 * Don't hold the group mutex because taking group mutex first and then
3312 * the device lock could potentially cause a deadlock as below. Assume
3313 * two threads T1 and T2. T1 is trying to change default domain of an
3314 * iommu group and T2 is trying to hot unplug a device or release [1] VF
3315 * of a PCIe device which is in the same iommu group. T1 takes group
3316 * mutex and before it could take device lock assume T2 has taken device
3317 * lock and is yet to take group mutex. Now, both the threads will be
3318 * waiting for the other thread to release lock. Below, lock order was
3321 * mutex_lock(&group->mutex);
3322 * iommu_change_dev_def_domain();
3323 * mutex_unlock(&group->mutex);
3324 * device_unlock(dev);
3326 * [1] Typical device release path
3327 * device_lock() from device/driver core code
3329 * -> iommu_bus_notifier()
3330 * -> iommu_release_device()
3331 * -> ops->release_device() vendor driver calls back iommu core code
3332 * -> mutex_lock() from iommu core code
3334 mutex_unlock(&group
->mutex
);
3336 /* Check if the device in the group still has a driver bound to it */
3338 if (device_is_bound(dev
) && !(req_type
== IOMMU_DOMAIN_DMA_FQ
&&
3339 group
->default_domain
->type
== IOMMU_DOMAIN_DMA
)) {
3340 pr_err_ratelimited("Device is still bound to driver\n");
3345 ret
= iommu_change_dev_def_domain(group
, dev
, req_type
);