2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <jroedel@suse.de>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #define pr_fmt(fmt) "iommu: " fmt
21 #include <linux/device.h>
22 #include <linux/kernel.h>
23 #include <linux/bug.h>
24 #include <linux/types.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/iommu.h>
29 #include <linux/idr.h>
30 #include <linux/notifier.h>
31 #include <linux/err.h>
32 #include <linux/pci.h>
33 #include <linux/bitops.h>
34 #include <linux/property.h>
35 #include <trace/events/iommu.h>
37 static struct kset
*iommu_group_kset
;
38 static DEFINE_IDA(iommu_group_ida
);
40 struct iommu_callback_data
{
41 const struct iommu_ops
*ops
;
46 struct kobject
*devices_kobj
;
47 struct list_head devices
;
49 struct blocking_notifier_head notifier
;
51 void (*iommu_data_release
)(void *iommu_data
);
54 struct iommu_domain
*default_domain
;
55 struct iommu_domain
*domain
;
59 struct list_head list
;
64 struct iommu_group_attribute
{
65 struct attribute attr
;
66 ssize_t (*show
)(struct iommu_group
*group
, char *buf
);
67 ssize_t (*store
)(struct iommu_group
*group
,
68 const char *buf
, size_t count
);
71 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
72 struct iommu_group_attribute iommu_group_attr_##_name = \
73 __ATTR(_name, _mode, _show, _store)
75 #define to_iommu_group_attr(_attr) \
76 container_of(_attr, struct iommu_group_attribute, attr)
77 #define to_iommu_group(_kobj) \
78 container_of(_kobj, struct iommu_group, kobj)
80 static LIST_HEAD(iommu_device_list
);
81 static DEFINE_SPINLOCK(iommu_device_lock
);
83 int iommu_device_register(struct iommu_device
*iommu
)
85 spin_lock(&iommu_device_lock
);
86 list_add_tail(&iommu
->list
, &iommu_device_list
);
87 spin_unlock(&iommu_device_lock
);
92 void iommu_device_unregister(struct iommu_device
*iommu
)
94 spin_lock(&iommu_device_lock
);
95 list_del(&iommu
->list
);
96 spin_unlock(&iommu_device_lock
);
99 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
101 static int __iommu_attach_device(struct iommu_domain
*domain
,
103 static int __iommu_attach_group(struct iommu_domain
*domain
,
104 struct iommu_group
*group
);
105 static void __iommu_detach_group(struct iommu_domain
*domain
,
106 struct iommu_group
*group
);
108 static ssize_t
iommu_group_attr_show(struct kobject
*kobj
,
109 struct attribute
*__attr
, char *buf
)
111 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
112 struct iommu_group
*group
= to_iommu_group(kobj
);
116 ret
= attr
->show(group
, buf
);
120 static ssize_t
iommu_group_attr_store(struct kobject
*kobj
,
121 struct attribute
*__attr
,
122 const char *buf
, size_t count
)
124 struct iommu_group_attribute
*attr
= to_iommu_group_attr(__attr
);
125 struct iommu_group
*group
= to_iommu_group(kobj
);
129 ret
= attr
->store(group
, buf
, count
);
133 static const struct sysfs_ops iommu_group_sysfs_ops
= {
134 .show
= iommu_group_attr_show
,
135 .store
= iommu_group_attr_store
,
138 static int iommu_group_create_file(struct iommu_group
*group
,
139 struct iommu_group_attribute
*attr
)
141 return sysfs_create_file(&group
->kobj
, &attr
->attr
);
144 static void iommu_group_remove_file(struct iommu_group
*group
,
145 struct iommu_group_attribute
*attr
)
147 sysfs_remove_file(&group
->kobj
, &attr
->attr
);
150 static ssize_t
iommu_group_show_name(struct iommu_group
*group
, char *buf
)
152 return sprintf(buf
, "%s\n", group
->name
);
155 static IOMMU_GROUP_ATTR(name
, S_IRUGO
, iommu_group_show_name
, NULL
);
157 static void iommu_group_release(struct kobject
*kobj
)
159 struct iommu_group
*group
= to_iommu_group(kobj
);
161 pr_debug("Releasing group %d\n", group
->id
);
163 if (group
->iommu_data_release
)
164 group
->iommu_data_release(group
->iommu_data
);
166 ida_simple_remove(&iommu_group_ida
, group
->id
);
168 if (group
->default_domain
)
169 iommu_domain_free(group
->default_domain
);
175 static struct kobj_type iommu_group_ktype
= {
176 .sysfs_ops
= &iommu_group_sysfs_ops
,
177 .release
= iommu_group_release
,
181 * iommu_group_alloc - Allocate a new group
182 * @name: Optional name to associate with group, visible in sysfs
184 * This function is called by an iommu driver to allocate a new iommu
185 * group. The iommu group represents the minimum granularity of the iommu.
186 * Upon successful return, the caller holds a reference to the supplied
187 * group in order to hold the group until devices are added. Use
188 * iommu_group_put() to release this extra reference count, allowing the
189 * group to be automatically reclaimed once it has no devices or external
192 struct iommu_group
*iommu_group_alloc(void)
194 struct iommu_group
*group
;
197 group
= kzalloc(sizeof(*group
), GFP_KERNEL
);
199 return ERR_PTR(-ENOMEM
);
201 group
->kobj
.kset
= iommu_group_kset
;
202 mutex_init(&group
->mutex
);
203 INIT_LIST_HEAD(&group
->devices
);
204 BLOCKING_INIT_NOTIFIER_HEAD(&group
->notifier
);
206 ret
= ida_simple_get(&iommu_group_ida
, 0, 0, GFP_KERNEL
);
213 ret
= kobject_init_and_add(&group
->kobj
, &iommu_group_ktype
,
214 NULL
, "%d", group
->id
);
216 ida_simple_remove(&iommu_group_ida
, group
->id
);
221 group
->devices_kobj
= kobject_create_and_add("devices", &group
->kobj
);
222 if (!group
->devices_kobj
) {
223 kobject_put(&group
->kobj
); /* triggers .release & free */
224 return ERR_PTR(-ENOMEM
);
228 * The devices_kobj holds a reference on the group kobject, so
229 * as long as that exists so will the group. We can therefore
230 * use the devices_kobj for reference counting.
232 kobject_put(&group
->kobj
);
234 pr_debug("Allocated group %d\n", group
->id
);
238 EXPORT_SYMBOL_GPL(iommu_group_alloc
);
240 struct iommu_group
*iommu_group_get_by_id(int id
)
242 struct kobject
*group_kobj
;
243 struct iommu_group
*group
;
246 if (!iommu_group_kset
)
249 name
= kasprintf(GFP_KERNEL
, "%d", id
);
253 group_kobj
= kset_find_obj(iommu_group_kset
, name
);
259 group
= container_of(group_kobj
, struct iommu_group
, kobj
);
260 BUG_ON(group
->id
!= id
);
262 kobject_get(group
->devices_kobj
);
263 kobject_put(&group
->kobj
);
267 EXPORT_SYMBOL_GPL(iommu_group_get_by_id
);
270 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
273 * iommu drivers can store data in the group for use when doing iommu
274 * operations. This function provides a way to retrieve it. Caller
275 * should hold a group reference.
277 void *iommu_group_get_iommudata(struct iommu_group
*group
)
279 return group
->iommu_data
;
281 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata
);
284 * iommu_group_set_iommudata - set iommu_data for a group
286 * @iommu_data: new data
287 * @release: release function for iommu_data
289 * iommu drivers can store data in the group for use when doing iommu
290 * operations. This function provides a way to set the data after
291 * the group has been allocated. Caller should hold a group reference.
293 void iommu_group_set_iommudata(struct iommu_group
*group
, void *iommu_data
,
294 void (*release
)(void *iommu_data
))
296 group
->iommu_data
= iommu_data
;
297 group
->iommu_data_release
= release
;
299 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata
);
302 * iommu_group_set_name - set name for a group
306 * Allow iommu driver to set a name for a group. When set it will
307 * appear in a name attribute file under the group in sysfs.
309 int iommu_group_set_name(struct iommu_group
*group
, const char *name
)
314 iommu_group_remove_file(group
, &iommu_group_attr_name
);
321 group
->name
= kstrdup(name
, GFP_KERNEL
);
325 ret
= iommu_group_create_file(group
, &iommu_group_attr_name
);
334 EXPORT_SYMBOL_GPL(iommu_group_set_name
);
336 static int iommu_group_create_direct_mappings(struct iommu_group
*group
,
339 struct iommu_domain
*domain
= group
->default_domain
;
340 struct iommu_dm_region
*entry
;
341 struct list_head mappings
;
342 unsigned long pg_size
;
345 if (!domain
|| domain
->type
!= IOMMU_DOMAIN_DMA
)
348 BUG_ON(!domain
->pgsize_bitmap
);
350 pg_size
= 1UL << __ffs(domain
->pgsize_bitmap
);
351 INIT_LIST_HEAD(&mappings
);
353 iommu_get_dm_regions(dev
, &mappings
);
355 /* We need to consider overlapping regions for different devices */
356 list_for_each_entry(entry
, &mappings
, list
) {
357 dma_addr_t start
, end
, addr
;
359 if (domain
->ops
->apply_dm_region
)
360 domain
->ops
->apply_dm_region(dev
, domain
, entry
);
362 start
= ALIGN(entry
->start
, pg_size
);
363 end
= ALIGN(entry
->start
+ entry
->length
, pg_size
);
365 for (addr
= start
; addr
< end
; addr
+= pg_size
) {
366 phys_addr_t phys_addr
;
368 phys_addr
= iommu_iova_to_phys(domain
, addr
);
372 ret
= iommu_map(domain
, addr
, addr
, pg_size
, entry
->prot
);
380 iommu_put_dm_regions(dev
, &mappings
);
386 * iommu_group_add_device - add a device to an iommu group
387 * @group: the group into which to add the device (reference should be held)
390 * This function is called by an iommu driver to add a device into a
391 * group. Adding a device increments the group reference count.
393 int iommu_group_add_device(struct iommu_group
*group
, struct device
*dev
)
396 struct group_device
*device
;
398 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
404 ret
= sysfs_create_link(&dev
->kobj
, &group
->kobj
, "iommu_group");
406 goto err_free_device
;
408 device
->name
= kasprintf(GFP_KERNEL
, "%s", kobject_name(&dev
->kobj
));
412 goto err_remove_link
;
415 ret
= sysfs_create_link_nowarn(group
->devices_kobj
,
416 &dev
->kobj
, device
->name
);
418 if (ret
== -EEXIST
&& i
>= 0) {
420 * Account for the slim chance of collision
421 * and append an instance to the name.
424 device
->name
= kasprintf(GFP_KERNEL
, "%s.%d",
425 kobject_name(&dev
->kobj
), i
++);
431 kobject_get(group
->devices_kobj
);
433 dev
->iommu_group
= group
;
435 iommu_group_create_direct_mappings(group
, dev
);
437 mutex_lock(&group
->mutex
);
438 list_add_tail(&device
->list
, &group
->devices
);
440 ret
= __iommu_attach_device(group
->domain
, dev
);
441 mutex_unlock(&group
->mutex
);
445 /* Notify any listeners about change to group. */
446 blocking_notifier_call_chain(&group
->notifier
,
447 IOMMU_GROUP_NOTIFY_ADD_DEVICE
, dev
);
449 trace_add_device_to_group(group
->id
, dev
);
451 pr_info("Adding device %s to group %d\n", dev_name(dev
), group
->id
);
456 mutex_lock(&group
->mutex
);
457 list_del(&device
->list
);
458 mutex_unlock(&group
->mutex
);
459 dev
->iommu_group
= NULL
;
460 kobject_put(group
->devices_kobj
);
464 sysfs_remove_link(&dev
->kobj
, "iommu_group");
467 pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev
), group
->id
, ret
);
470 EXPORT_SYMBOL_GPL(iommu_group_add_device
);
473 * iommu_group_remove_device - remove a device from it's current group
474 * @dev: device to be removed
476 * This function is called by an iommu driver to remove the device from
477 * it's current group. This decrements the iommu group reference count.
479 void iommu_group_remove_device(struct device
*dev
)
481 struct iommu_group
*group
= dev
->iommu_group
;
482 struct group_device
*tmp_device
, *device
= NULL
;
484 pr_info("Removing device %s from group %d\n", dev_name(dev
), group
->id
);
486 /* Pre-notify listeners that a device is being removed. */
487 blocking_notifier_call_chain(&group
->notifier
,
488 IOMMU_GROUP_NOTIFY_DEL_DEVICE
, dev
);
490 mutex_lock(&group
->mutex
);
491 list_for_each_entry(tmp_device
, &group
->devices
, list
) {
492 if (tmp_device
->dev
== dev
) {
494 list_del(&device
->list
);
498 mutex_unlock(&group
->mutex
);
503 sysfs_remove_link(group
->devices_kobj
, device
->name
);
504 sysfs_remove_link(&dev
->kobj
, "iommu_group");
506 trace_remove_device_from_group(group
->id
, dev
);
510 dev
->iommu_group
= NULL
;
511 kobject_put(group
->devices_kobj
);
513 EXPORT_SYMBOL_GPL(iommu_group_remove_device
);
515 static int iommu_group_device_count(struct iommu_group
*group
)
517 struct group_device
*entry
;
520 list_for_each_entry(entry
, &group
->devices
, list
)
527 * iommu_group_for_each_dev - iterate over each device in the group
529 * @data: caller opaque data to be passed to callback function
530 * @fn: caller supplied callback function
532 * This function is called by group users to iterate over group devices.
533 * Callers should hold a reference count to the group during callback.
534 * The group->mutex is held across callbacks, which will block calls to
535 * iommu_group_add/remove_device.
537 static int __iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
538 int (*fn
)(struct device
*, void *))
540 struct group_device
*device
;
543 list_for_each_entry(device
, &group
->devices
, list
) {
544 ret
= fn(device
->dev
, data
);
552 int iommu_group_for_each_dev(struct iommu_group
*group
, void *data
,
553 int (*fn
)(struct device
*, void *))
557 mutex_lock(&group
->mutex
);
558 ret
= __iommu_group_for_each_dev(group
, data
, fn
);
559 mutex_unlock(&group
->mutex
);
563 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev
);
566 * iommu_group_get - Return the group for a device and increment reference
567 * @dev: get the group that this device belongs to
569 * This function is called by iommu drivers and users to get the group
570 * for the specified device. If found, the group is returned and the group
571 * reference in incremented, else NULL.
573 struct iommu_group
*iommu_group_get(struct device
*dev
)
575 struct iommu_group
*group
= dev
->iommu_group
;
578 kobject_get(group
->devices_kobj
);
582 EXPORT_SYMBOL_GPL(iommu_group_get
);
585 * iommu_group_ref_get - Increment reference on a group
586 * @group: the group to use, must not be NULL
588 * This function is called by iommu drivers to take additional references on an
589 * existing group. Returns the given group for convenience.
591 struct iommu_group
*iommu_group_ref_get(struct iommu_group
*group
)
593 kobject_get(group
->devices_kobj
);
598 * iommu_group_put - Decrement group reference
599 * @group: the group to use
601 * This function is called by iommu drivers and users to release the
602 * iommu group. Once the reference count is zero, the group is released.
604 void iommu_group_put(struct iommu_group
*group
)
607 kobject_put(group
->devices_kobj
);
609 EXPORT_SYMBOL_GPL(iommu_group_put
);
612 * iommu_group_register_notifier - Register a notifier for group changes
613 * @group: the group to watch
614 * @nb: notifier block to signal
616 * This function allows iommu group users to track changes in a group.
617 * See include/linux/iommu.h for actions sent via this notifier. Caller
618 * should hold a reference to the group throughout notifier registration.
620 int iommu_group_register_notifier(struct iommu_group
*group
,
621 struct notifier_block
*nb
)
623 return blocking_notifier_chain_register(&group
->notifier
, nb
);
625 EXPORT_SYMBOL_GPL(iommu_group_register_notifier
);
628 * iommu_group_unregister_notifier - Unregister a notifier
629 * @group: the group to watch
630 * @nb: notifier block to signal
632 * Unregister a previously registered group notifier block.
634 int iommu_group_unregister_notifier(struct iommu_group
*group
,
635 struct notifier_block
*nb
)
637 return blocking_notifier_chain_unregister(&group
->notifier
, nb
);
639 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier
);
642 * iommu_group_id - Return ID for a group
643 * @group: the group to ID
645 * Return the unique ID for the group matching the sysfs group number.
647 int iommu_group_id(struct iommu_group
*group
)
651 EXPORT_SYMBOL_GPL(iommu_group_id
);
653 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
654 unsigned long *devfns
);
657 * To consider a PCI device isolated, we require ACS to support Source
658 * Validation, Request Redirection, Completer Redirection, and Upstream
659 * Forwarding. This effectively means that devices cannot spoof their
660 * requester ID, requests and completions cannot be redirected, and all
661 * transactions are forwarded upstream, even as it passes through a
662 * bridge where the target device is downstream.
664 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
667 * For multifunction devices which are not isolated from each other, find
668 * all the other non-isolated functions and look for existing groups. For
669 * each function, we also need to look for aliases to or from other devices
670 * that may already have a group.
672 static struct iommu_group
*get_pci_function_alias_group(struct pci_dev
*pdev
,
673 unsigned long *devfns
)
675 struct pci_dev
*tmp
= NULL
;
676 struct iommu_group
*group
;
678 if (!pdev
->multifunction
|| pci_acs_enabled(pdev
, REQ_ACS_FLAGS
))
681 for_each_pci_dev(tmp
) {
682 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
||
683 PCI_SLOT(tmp
->devfn
) != PCI_SLOT(pdev
->devfn
) ||
684 pci_acs_enabled(tmp
, REQ_ACS_FLAGS
))
687 group
= get_pci_alias_group(tmp
, devfns
);
698 * Look for aliases to or from the given device for existing groups. DMA
699 * aliases are only supported on the same bus, therefore the search
700 * space is quite small (especially since we're really only looking at pcie
701 * device, and therefore only expect multiple slots on the root complex or
702 * downstream switch ports). It's conceivable though that a pair of
703 * multifunction devices could have aliases between them that would cause a
704 * loop. To prevent this, we use a bitmap to track where we've been.
706 static struct iommu_group
*get_pci_alias_group(struct pci_dev
*pdev
,
707 unsigned long *devfns
)
709 struct pci_dev
*tmp
= NULL
;
710 struct iommu_group
*group
;
712 if (test_and_set_bit(pdev
->devfn
& 0xff, devfns
))
715 group
= iommu_group_get(&pdev
->dev
);
719 for_each_pci_dev(tmp
) {
720 if (tmp
== pdev
|| tmp
->bus
!= pdev
->bus
)
723 /* We alias them or they alias us */
724 if (pci_devs_are_dma_aliases(pdev
, tmp
)) {
725 group
= get_pci_alias_group(tmp
, devfns
);
731 group
= get_pci_function_alias_group(tmp
, devfns
);
742 struct group_for_pci_data
{
743 struct pci_dev
*pdev
;
744 struct iommu_group
*group
;
748 * DMA alias iterator callback, return the last seen device. Stop and return
749 * the IOMMU group if we find one along the way.
751 static int get_pci_alias_or_group(struct pci_dev
*pdev
, u16 alias
, void *opaque
)
753 struct group_for_pci_data
*data
= opaque
;
756 data
->group
= iommu_group_get(&pdev
->dev
);
758 return data
->group
!= NULL
;
762 * Generic device_group call-back function. It just allocates one
763 * iommu-group per device.
765 struct iommu_group
*generic_device_group(struct device
*dev
)
767 struct iommu_group
*group
;
769 group
= iommu_group_alloc();
777 * Use standard PCI bus topology, isolation features, and DMA alias quirks
778 * to find or create an IOMMU group for a device.
780 struct iommu_group
*pci_device_group(struct device
*dev
)
782 struct pci_dev
*pdev
= to_pci_dev(dev
);
783 struct group_for_pci_data data
;
785 struct iommu_group
*group
= NULL
;
786 u64 devfns
[4] = { 0 };
788 if (WARN_ON(!dev_is_pci(dev
)))
789 return ERR_PTR(-EINVAL
);
792 * Find the upstream DMA alias for the device. A device must not
793 * be aliased due to topology in order to have its own IOMMU group.
794 * If we find an alias along the way that already belongs to a
797 if (pci_for_each_dma_alias(pdev
, get_pci_alias_or_group
, &data
))
803 * Continue upstream from the point of minimum IOMMU granularity
804 * due to aliases to the point where devices are protected from
805 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
808 for (bus
= pdev
->bus
; !pci_is_root_bus(bus
); bus
= bus
->parent
) {
812 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
817 group
= iommu_group_get(&pdev
->dev
);
823 * Look for existing groups on device aliases. If we alias another
824 * device or another device aliases us, use the same group.
826 group
= get_pci_alias_group(pdev
, (unsigned long *)devfns
);
831 * Look for existing groups on non-isolated functions on the same
832 * slot and aliases of those funcions, if any. No need to clear
833 * the search bitmap, the tested devfns are still valid.
835 group
= get_pci_function_alias_group(pdev
, (unsigned long *)devfns
);
839 /* No shared group found, allocate new */
840 group
= iommu_group_alloc();
848 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
849 * @dev: target device
851 * This function is intended to be called by IOMMU drivers and extended to
852 * support common, bus-defined algorithms when determining or creating the
853 * IOMMU group for a device. On success, the caller will hold a reference
854 * to the returned IOMMU group, which will already include the provided
855 * device. The reference should be released with iommu_group_put().
857 struct iommu_group
*iommu_group_get_for_dev(struct device
*dev
)
859 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
860 struct iommu_group
*group
;
863 group
= iommu_group_get(dev
);
867 group
= ERR_PTR(-EINVAL
);
869 if (ops
&& ops
->device_group
)
870 group
= ops
->device_group(dev
);
876 * Try to allocate a default domain - needs support from the
879 if (!group
->default_domain
) {
880 group
->default_domain
= __iommu_domain_alloc(dev
->bus
,
883 group
->domain
= group
->default_domain
;
886 ret
= iommu_group_add_device(group
, dev
);
888 iommu_group_put(group
);
895 struct iommu_domain
*iommu_group_default_domain(struct iommu_group
*group
)
897 return group
->default_domain
;
900 static int add_iommu_group(struct device
*dev
, void *data
)
902 struct iommu_callback_data
*cb
= data
;
903 const struct iommu_ops
*ops
= cb
->ops
;
906 if (!ops
->add_device
)
909 WARN_ON(dev
->iommu_group
);
911 ret
= ops
->add_device(dev
);
914 * We ignore -ENODEV errors for now, as they just mean that the
915 * device is not translated by an IOMMU. We still care about
916 * other errors and fail to initialize when they happen.
924 static int remove_iommu_group(struct device
*dev
, void *data
)
926 struct iommu_callback_data
*cb
= data
;
927 const struct iommu_ops
*ops
= cb
->ops
;
929 if (ops
->remove_device
&& dev
->iommu_group
)
930 ops
->remove_device(dev
);
935 static int iommu_bus_notifier(struct notifier_block
*nb
,
936 unsigned long action
, void *data
)
938 struct device
*dev
= data
;
939 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
940 struct iommu_group
*group
;
941 unsigned long group_action
= 0;
944 * ADD/DEL call into iommu driver ops if provided, which may
945 * result in ADD/DEL notifiers to group->notifier
947 if (action
== BUS_NOTIFY_ADD_DEVICE
) {
949 return ops
->add_device(dev
);
950 } else if (action
== BUS_NOTIFY_REMOVED_DEVICE
) {
951 if (ops
->remove_device
&& dev
->iommu_group
) {
952 ops
->remove_device(dev
);
958 * Remaining BUS_NOTIFYs get filtered and republished to the
959 * group, if anyone is listening
961 group
= iommu_group_get(dev
);
966 case BUS_NOTIFY_BIND_DRIVER
:
967 group_action
= IOMMU_GROUP_NOTIFY_BIND_DRIVER
;
969 case BUS_NOTIFY_BOUND_DRIVER
:
970 group_action
= IOMMU_GROUP_NOTIFY_BOUND_DRIVER
;
972 case BUS_NOTIFY_UNBIND_DRIVER
:
973 group_action
= IOMMU_GROUP_NOTIFY_UNBIND_DRIVER
;
975 case BUS_NOTIFY_UNBOUND_DRIVER
:
976 group_action
= IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER
;
981 blocking_notifier_call_chain(&group
->notifier
,
984 iommu_group_put(group
);
988 static int iommu_bus_init(struct bus_type
*bus
, const struct iommu_ops
*ops
)
991 struct notifier_block
*nb
;
992 struct iommu_callback_data cb
= {
996 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
1000 nb
->notifier_call
= iommu_bus_notifier
;
1002 err
= bus_register_notifier(bus
, nb
);
1006 err
= bus_for_each_dev(bus
, NULL
, &cb
, add_iommu_group
);
1015 bus_for_each_dev(bus
, NULL
, &cb
, remove_iommu_group
);
1016 bus_unregister_notifier(bus
, nb
);
1025 * bus_set_iommu - set iommu-callbacks for the bus
1027 * @ops: the callbacks provided by the iommu-driver
1029 * This function is called by an iommu driver to set the iommu methods
1030 * used for a particular bus. Drivers for devices on that bus can use
1031 * the iommu-api after these ops are registered.
1032 * This special function is needed because IOMMUs are usually devices on
1033 * the bus itself, so the iommu drivers are not initialized when the bus
1034 * is set up. With this function the iommu-driver can set the iommu-ops
1037 int bus_set_iommu(struct bus_type
*bus
, const struct iommu_ops
*ops
)
1041 if (bus
->iommu_ops
!= NULL
)
1044 bus
->iommu_ops
= ops
;
1046 /* Do IOMMU specific setup for this bus-type */
1047 err
= iommu_bus_init(bus
, ops
);
1049 bus
->iommu_ops
= NULL
;
1053 EXPORT_SYMBOL_GPL(bus_set_iommu
);
1055 bool iommu_present(struct bus_type
*bus
)
1057 return bus
->iommu_ops
!= NULL
;
1059 EXPORT_SYMBOL_GPL(iommu_present
);
1061 bool iommu_capable(struct bus_type
*bus
, enum iommu_cap cap
)
1063 if (!bus
->iommu_ops
|| !bus
->iommu_ops
->capable
)
1066 return bus
->iommu_ops
->capable(cap
);
1068 EXPORT_SYMBOL_GPL(iommu_capable
);
1071 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1072 * @domain: iommu domain
1073 * @handler: fault handler
1074 * @token: user data, will be passed back to the fault handler
1076 * This function should be used by IOMMU users which want to be notified
1077 * whenever an IOMMU fault happens.
1079 * The fault handler itself should return 0 on success, and an appropriate
1080 * error code otherwise.
1082 void iommu_set_fault_handler(struct iommu_domain
*domain
,
1083 iommu_fault_handler_t handler
,
1088 domain
->handler
= handler
;
1089 domain
->handler_token
= token
;
1091 EXPORT_SYMBOL_GPL(iommu_set_fault_handler
);
1093 static struct iommu_domain
*__iommu_domain_alloc(struct bus_type
*bus
,
1096 struct iommu_domain
*domain
;
1098 if (bus
== NULL
|| bus
->iommu_ops
== NULL
)
1101 domain
= bus
->iommu_ops
->domain_alloc(type
);
1105 domain
->ops
= bus
->iommu_ops
;
1106 domain
->type
= type
;
1107 /* Assume all sizes by default; the driver may override this later */
1108 domain
->pgsize_bitmap
= bus
->iommu_ops
->pgsize_bitmap
;
1113 struct iommu_domain
*iommu_domain_alloc(struct bus_type
*bus
)
1115 return __iommu_domain_alloc(bus
, IOMMU_DOMAIN_UNMANAGED
);
1117 EXPORT_SYMBOL_GPL(iommu_domain_alloc
);
1119 void iommu_domain_free(struct iommu_domain
*domain
)
1121 domain
->ops
->domain_free(domain
);
1123 EXPORT_SYMBOL_GPL(iommu_domain_free
);
1125 static int __iommu_attach_device(struct iommu_domain
*domain
,
1129 if (unlikely(domain
->ops
->attach_dev
== NULL
))
1132 ret
= domain
->ops
->attach_dev(domain
, dev
);
1134 trace_attach_device_to_domain(dev
);
1138 int iommu_attach_device(struct iommu_domain
*domain
, struct device
*dev
)
1140 struct iommu_group
*group
;
1143 group
= iommu_group_get(dev
);
1144 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1146 return __iommu_attach_device(domain
, dev
);
1149 * We have a group - lock it to make sure the device-count doesn't
1150 * change while we are attaching
1152 mutex_lock(&group
->mutex
);
1154 if (iommu_group_device_count(group
) != 1)
1157 ret
= __iommu_attach_group(domain
, group
);
1160 mutex_unlock(&group
->mutex
);
1161 iommu_group_put(group
);
1165 EXPORT_SYMBOL_GPL(iommu_attach_device
);
1167 static void __iommu_detach_device(struct iommu_domain
*domain
,
1170 if (unlikely(domain
->ops
->detach_dev
== NULL
))
1173 domain
->ops
->detach_dev(domain
, dev
);
1174 trace_detach_device_from_domain(dev
);
1177 void iommu_detach_device(struct iommu_domain
*domain
, struct device
*dev
)
1179 struct iommu_group
*group
;
1181 group
= iommu_group_get(dev
);
1182 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1184 return __iommu_detach_device(domain
, dev
);
1186 mutex_lock(&group
->mutex
);
1187 if (iommu_group_device_count(group
) != 1) {
1192 __iommu_detach_group(domain
, group
);
1195 mutex_unlock(&group
->mutex
);
1196 iommu_group_put(group
);
1198 EXPORT_SYMBOL_GPL(iommu_detach_device
);
1200 struct iommu_domain
*iommu_get_domain_for_dev(struct device
*dev
)
1202 struct iommu_domain
*domain
;
1203 struct iommu_group
*group
;
1205 group
= iommu_group_get(dev
);
1206 /* FIXME: Remove this when groups a mandatory for iommu drivers */
1210 domain
= group
->domain
;
1212 iommu_group_put(group
);
1216 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev
);
1219 * IOMMU groups are really the natrual working unit of the IOMMU, but
1220 * the IOMMU API works on domains and devices. Bridge that gap by
1221 * iterating over the devices in a group. Ideally we'd have a single
1222 * device which represents the requestor ID of the group, but we also
1223 * allow IOMMU drivers to create policy defined minimum sets, where
1224 * the physical hardware may be able to distiguish members, but we
1225 * wish to group them at a higher level (ex. untrusted multi-function
1226 * PCI devices). Thus we attach each device.
1228 static int iommu_group_do_attach_device(struct device
*dev
, void *data
)
1230 struct iommu_domain
*domain
= data
;
1232 return __iommu_attach_device(domain
, dev
);
1235 static int __iommu_attach_group(struct iommu_domain
*domain
,
1236 struct iommu_group
*group
)
1240 if (group
->default_domain
&& group
->domain
!= group
->default_domain
)
1243 ret
= __iommu_group_for_each_dev(group
, domain
,
1244 iommu_group_do_attach_device
);
1246 group
->domain
= domain
;
1251 int iommu_attach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1255 mutex_lock(&group
->mutex
);
1256 ret
= __iommu_attach_group(domain
, group
);
1257 mutex_unlock(&group
->mutex
);
1261 EXPORT_SYMBOL_GPL(iommu_attach_group
);
1263 static int iommu_group_do_detach_device(struct device
*dev
, void *data
)
1265 struct iommu_domain
*domain
= data
;
1267 __iommu_detach_device(domain
, dev
);
1272 static void __iommu_detach_group(struct iommu_domain
*domain
,
1273 struct iommu_group
*group
)
1277 if (!group
->default_domain
) {
1278 __iommu_group_for_each_dev(group
, domain
,
1279 iommu_group_do_detach_device
);
1280 group
->domain
= NULL
;
1284 if (group
->domain
== group
->default_domain
)
1287 /* Detach by re-attaching to the default domain */
1288 ret
= __iommu_group_for_each_dev(group
, group
->default_domain
,
1289 iommu_group_do_attach_device
);
1293 group
->domain
= group
->default_domain
;
1296 void iommu_detach_group(struct iommu_domain
*domain
, struct iommu_group
*group
)
1298 mutex_lock(&group
->mutex
);
1299 __iommu_detach_group(domain
, group
);
1300 mutex_unlock(&group
->mutex
);
1302 EXPORT_SYMBOL_GPL(iommu_detach_group
);
1304 phys_addr_t
iommu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1306 if (unlikely(domain
->ops
->iova_to_phys
== NULL
))
1309 return domain
->ops
->iova_to_phys(domain
, iova
);
1311 EXPORT_SYMBOL_GPL(iommu_iova_to_phys
);
1313 static size_t iommu_pgsize(struct iommu_domain
*domain
,
1314 unsigned long addr_merge
, size_t size
)
1316 unsigned int pgsize_idx
;
1319 /* Max page size that still fits into 'size' */
1320 pgsize_idx
= __fls(size
);
1322 /* need to consider alignment requirements ? */
1323 if (likely(addr_merge
)) {
1324 /* Max page size allowed by address */
1325 unsigned int align_pgsize_idx
= __ffs(addr_merge
);
1326 pgsize_idx
= min(pgsize_idx
, align_pgsize_idx
);
1329 /* build a mask of acceptable page sizes */
1330 pgsize
= (1UL << (pgsize_idx
+ 1)) - 1;
1332 /* throw away page sizes not supported by the hardware */
1333 pgsize
&= domain
->pgsize_bitmap
;
1335 /* make sure we're still sane */
1338 /* pick the biggest page */
1339 pgsize_idx
= __fls(pgsize
);
1340 pgsize
= 1UL << pgsize_idx
;
1345 int iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
1346 phys_addr_t paddr
, size_t size
, int prot
)
1348 unsigned long orig_iova
= iova
;
1349 unsigned int min_pagesz
;
1350 size_t orig_size
= size
;
1351 phys_addr_t orig_paddr
= paddr
;
1354 if (unlikely(domain
->ops
->map
== NULL
||
1355 domain
->pgsize_bitmap
== 0UL))
1358 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1361 /* find out the minimum page size supported */
1362 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1365 * both the virtual address and the physical one, as well as
1366 * the size of the mapping, must be aligned (at least) to the
1367 * size of the smallest page supported by the hardware
1369 if (!IS_ALIGNED(iova
| paddr
| size
, min_pagesz
)) {
1370 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1371 iova
, &paddr
, size
, min_pagesz
);
1375 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova
, &paddr
, size
);
1378 size_t pgsize
= iommu_pgsize(domain
, iova
| paddr
, size
);
1380 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1381 iova
, &paddr
, pgsize
);
1383 ret
= domain
->ops
->map(domain
, iova
, paddr
, pgsize
, prot
);
1392 /* unroll mapping in case something went wrong */
1394 iommu_unmap(domain
, orig_iova
, orig_size
- size
);
1396 trace_map(orig_iova
, orig_paddr
, orig_size
);
1400 EXPORT_SYMBOL_GPL(iommu_map
);
1402 size_t iommu_unmap(struct iommu_domain
*domain
, unsigned long iova
, size_t size
)
1404 size_t unmapped_page
, unmapped
= 0;
1405 unsigned int min_pagesz
;
1406 unsigned long orig_iova
= iova
;
1408 if (unlikely(domain
->ops
->unmap
== NULL
||
1409 domain
->pgsize_bitmap
== 0UL))
1412 if (unlikely(!(domain
->type
& __IOMMU_DOMAIN_PAGING
)))
1415 /* find out the minimum page size supported */
1416 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1419 * The virtual address, as well as the size of the mapping, must be
1420 * aligned (at least) to the size of the smallest page supported
1423 if (!IS_ALIGNED(iova
| size
, min_pagesz
)) {
1424 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1425 iova
, size
, min_pagesz
);
1429 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova
, size
);
1432 * Keep iterating until we either unmap 'size' bytes (or more)
1433 * or we hit an area that isn't mapped.
1435 while (unmapped
< size
) {
1436 size_t pgsize
= iommu_pgsize(domain
, iova
, size
- unmapped
);
1438 unmapped_page
= domain
->ops
->unmap(domain
, iova
, pgsize
);
1442 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1443 iova
, unmapped_page
);
1445 iova
+= unmapped_page
;
1446 unmapped
+= unmapped_page
;
1449 trace_unmap(orig_iova
, size
, unmapped
);
1452 EXPORT_SYMBOL_GPL(iommu_unmap
);
1454 size_t default_iommu_map_sg(struct iommu_domain
*domain
, unsigned long iova
,
1455 struct scatterlist
*sg
, unsigned int nents
, int prot
)
1457 struct scatterlist
*s
;
1459 unsigned int i
, min_pagesz
;
1462 if (unlikely(domain
->pgsize_bitmap
== 0UL))
1465 min_pagesz
= 1 << __ffs(domain
->pgsize_bitmap
);
1467 for_each_sg(sg
, s
, nents
, i
) {
1468 phys_addr_t phys
= page_to_phys(sg_page(s
)) + s
->offset
;
1471 * We are mapping on IOMMU page boundaries, so offset within
1472 * the page must be 0. However, the IOMMU may support pages
1473 * smaller than PAGE_SIZE, so s->offset may still represent
1474 * an offset of that boundary within the CPU page.
1476 if (!IS_ALIGNED(s
->offset
, min_pagesz
))
1479 ret
= iommu_map(domain
, iova
+ mapped
, phys
, s
->length
, prot
);
1483 mapped
+= s
->length
;
1489 /* undo mappings already done */
1490 iommu_unmap(domain
, iova
, mapped
);
1495 EXPORT_SYMBOL_GPL(default_iommu_map_sg
);
1497 int iommu_domain_window_enable(struct iommu_domain
*domain
, u32 wnd_nr
,
1498 phys_addr_t paddr
, u64 size
, int prot
)
1500 if (unlikely(domain
->ops
->domain_window_enable
== NULL
))
1503 return domain
->ops
->domain_window_enable(domain
, wnd_nr
, paddr
, size
,
1506 EXPORT_SYMBOL_GPL(iommu_domain_window_enable
);
1508 void iommu_domain_window_disable(struct iommu_domain
*domain
, u32 wnd_nr
)
1510 if (unlikely(domain
->ops
->domain_window_disable
== NULL
))
1513 return domain
->ops
->domain_window_disable(domain
, wnd_nr
);
1515 EXPORT_SYMBOL_GPL(iommu_domain_window_disable
);
1517 static int __init
iommu_init(void)
1519 iommu_group_kset
= kset_create_and_add("iommu_groups",
1521 BUG_ON(!iommu_group_kset
);
1525 core_initcall(iommu_init
);
1527 int iommu_domain_get_attr(struct iommu_domain
*domain
,
1528 enum iommu_attr attr
, void *data
)
1530 struct iommu_domain_geometry
*geometry
;
1536 case DOMAIN_ATTR_GEOMETRY
:
1538 *geometry
= domain
->geometry
;
1541 case DOMAIN_ATTR_PAGING
:
1543 *paging
= (domain
->pgsize_bitmap
!= 0UL);
1545 case DOMAIN_ATTR_WINDOWS
:
1548 if (domain
->ops
->domain_get_windows
!= NULL
)
1549 *count
= domain
->ops
->domain_get_windows(domain
);
1555 if (!domain
->ops
->domain_get_attr
)
1558 ret
= domain
->ops
->domain_get_attr(domain
, attr
, data
);
1563 EXPORT_SYMBOL_GPL(iommu_domain_get_attr
);
1565 int iommu_domain_set_attr(struct iommu_domain
*domain
,
1566 enum iommu_attr attr
, void *data
)
1572 case DOMAIN_ATTR_WINDOWS
:
1575 if (domain
->ops
->domain_set_windows
!= NULL
)
1576 ret
= domain
->ops
->domain_set_windows(domain
, *count
);
1582 if (domain
->ops
->domain_set_attr
== NULL
)
1585 ret
= domain
->ops
->domain_set_attr(domain
, attr
, data
);
1590 EXPORT_SYMBOL_GPL(iommu_domain_set_attr
);
1592 void iommu_get_dm_regions(struct device
*dev
, struct list_head
*list
)
1594 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1596 if (ops
&& ops
->get_dm_regions
)
1597 ops
->get_dm_regions(dev
, list
);
1600 void iommu_put_dm_regions(struct device
*dev
, struct list_head
*list
)
1602 const struct iommu_ops
*ops
= dev
->bus
->iommu_ops
;
1604 if (ops
&& ops
->put_dm_regions
)
1605 ops
->put_dm_regions(dev
, list
);
1608 /* Request that a device is direct mapped by the IOMMU */
1609 int iommu_request_dm_for_dev(struct device
*dev
)
1611 struct iommu_domain
*dm_domain
;
1612 struct iommu_group
*group
;
1615 /* Device must already be in a group before calling this function */
1616 group
= iommu_group_get_for_dev(dev
);
1618 return PTR_ERR(group
);
1620 mutex_lock(&group
->mutex
);
1622 /* Check if the default domain is already direct mapped */
1624 if (group
->default_domain
&&
1625 group
->default_domain
->type
== IOMMU_DOMAIN_IDENTITY
)
1628 /* Don't change mappings of existing devices */
1630 if (iommu_group_device_count(group
) != 1)
1633 /* Allocate a direct mapped domain */
1635 dm_domain
= __iommu_domain_alloc(dev
->bus
, IOMMU_DOMAIN_IDENTITY
);
1639 /* Attach the device to the domain */
1640 ret
= __iommu_attach_group(dm_domain
, group
);
1642 iommu_domain_free(dm_domain
);
1646 /* Make the direct mapped domain the default for this group */
1647 if (group
->default_domain
)
1648 iommu_domain_free(group
->default_domain
);
1649 group
->default_domain
= dm_domain
;
1651 pr_info("Using direct mapping for device %s\n", dev_name(dev
));
1655 mutex_unlock(&group
->mutex
);
1656 iommu_group_put(group
);
1661 struct iommu_instance
{
1662 struct list_head list
;
1663 struct fwnode_handle
*fwnode
;
1664 const struct iommu_ops
*ops
;
1666 static LIST_HEAD(iommu_instance_list
);
1667 static DEFINE_SPINLOCK(iommu_instance_lock
);
1669 void iommu_register_instance(struct fwnode_handle
*fwnode
,
1670 const struct iommu_ops
*ops
)
1672 struct iommu_instance
*iommu
= kzalloc(sizeof(*iommu
), GFP_KERNEL
);
1674 if (WARN_ON(!iommu
))
1677 of_node_get(to_of_node(fwnode
));
1678 INIT_LIST_HEAD(&iommu
->list
);
1679 iommu
->fwnode
= fwnode
;
1681 spin_lock(&iommu_instance_lock
);
1682 list_add_tail(&iommu
->list
, &iommu_instance_list
);
1683 spin_unlock(&iommu_instance_lock
);
1686 const struct iommu_ops
*iommu_ops_from_fwnode(struct fwnode_handle
*fwnode
)
1688 struct iommu_instance
*instance
;
1689 const struct iommu_ops
*ops
= NULL
;
1691 spin_lock(&iommu_instance_lock
);
1692 list_for_each_entry(instance
, &iommu_instance_list
, list
)
1693 if (instance
->fwnode
== fwnode
) {
1694 ops
= instance
->ops
;
1697 spin_unlock(&iommu_instance_lock
);
1701 int iommu_fwspec_init(struct device
*dev
, struct fwnode_handle
*iommu_fwnode
,
1702 const struct iommu_ops
*ops
)
1704 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1707 return ops
== fwspec
->ops
? 0 : -EINVAL
;
1709 fwspec
= kzalloc(sizeof(*fwspec
), GFP_KERNEL
);
1713 of_node_get(to_of_node(iommu_fwnode
));
1714 fwspec
->iommu_fwnode
= iommu_fwnode
;
1716 dev
->iommu_fwspec
= fwspec
;
1719 EXPORT_SYMBOL_GPL(iommu_fwspec_init
);
1721 void iommu_fwspec_free(struct device
*dev
)
1723 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1726 fwnode_handle_put(fwspec
->iommu_fwnode
);
1728 dev
->iommu_fwspec
= NULL
;
1731 EXPORT_SYMBOL_GPL(iommu_fwspec_free
);
1733 int iommu_fwspec_add_ids(struct device
*dev
, u32
*ids
, int num_ids
)
1735 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1742 size
= offsetof(struct iommu_fwspec
, ids
[fwspec
->num_ids
+ num_ids
]);
1743 if (size
> sizeof(*fwspec
)) {
1744 fwspec
= krealloc(dev
->iommu_fwspec
, size
, GFP_KERNEL
);
1749 for (i
= 0; i
< num_ids
; i
++)
1750 fwspec
->ids
[fwspec
->num_ids
+ i
] = ids
[i
];
1752 fwspec
->num_ids
+= num_ids
;
1753 dev
->iommu_fwspec
= fwspec
;
1756 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids
);