]>
Commit | Line | Data |
---|---|---|
fc2100eb JR |
1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | |
63ce3ae8 | 3 | * Author: Joerg Roedel <jroedel@suse.de> |
fc2100eb JR |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published | |
7 | * by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
92e7066f | 19 | #define pr_fmt(fmt) "iommu: " fmt |
7d3002cc | 20 | |
905d66c1 | 21 | #include <linux/device.h> |
40998188 | 22 | #include <linux/kernel.h> |
fc2100eb JR |
23 | #include <linux/bug.h> |
24 | #include <linux/types.h> | |
60db4027 AM |
25 | #include <linux/module.h> |
26 | #include <linux/slab.h> | |
fc2100eb JR |
27 | #include <linux/errno.h> |
28 | #include <linux/iommu.h> | |
d72e31c9 AW |
29 | #include <linux/idr.h> |
30 | #include <linux/notifier.h> | |
31 | #include <linux/err.h> | |
104a1c13 | 32 | #include <linux/pci.h> |
f096c061 | 33 | #include <linux/bitops.h> |
57f98d2f | 34 | #include <linux/property.h> |
7f6db171 | 35 | #include <trace/events/iommu.h> |
d72e31c9 AW |
36 | |
37 | static struct kset *iommu_group_kset; | |
e38d1f13 | 38 | static DEFINE_IDA(iommu_group_ida); |
d72e31c9 | 39 | |
b22f6434 TR |
40 | struct iommu_callback_data { |
41 | const struct iommu_ops *ops; | |
42 | }; | |
43 | ||
d72e31c9 AW |
44 | struct iommu_group { |
45 | struct kobject kobj; | |
46 | struct kobject *devices_kobj; | |
47 | struct list_head devices; | |
48 | struct mutex mutex; | |
49 | struct blocking_notifier_head notifier; | |
50 | void *iommu_data; | |
51 | void (*iommu_data_release)(void *iommu_data); | |
52 | char *name; | |
53 | int id; | |
53723dc5 | 54 | struct iommu_domain *default_domain; |
e39cb8a3 | 55 | struct iommu_domain *domain; |
d72e31c9 AW |
56 | }; |
57 | ||
58 | struct iommu_device { | |
59 | struct list_head list; | |
60 | struct device *dev; | |
61 | char *name; | |
62 | }; | |
63 | ||
64 | struct iommu_group_attribute { | |
65 | struct attribute attr; | |
66 | ssize_t (*show)(struct iommu_group *group, char *buf); | |
67 | ssize_t (*store)(struct iommu_group *group, | |
68 | const char *buf, size_t count); | |
69 | }; | |
70 | ||
71 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ | |
72 | struct iommu_group_attribute iommu_group_attr_##_name = \ | |
73 | __ATTR(_name, _mode, _show, _store) | |
fc2100eb | 74 | |
d72e31c9 AW |
75 | #define to_iommu_group_attr(_attr) \ |
76 | container_of(_attr, struct iommu_group_attribute, attr) | |
77 | #define to_iommu_group(_kobj) \ | |
78 | container_of(_kobj, struct iommu_group, kobj) | |
fc2100eb | 79 | |
53723dc5 JR |
80 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
81 | unsigned type); | |
e39cb8a3 JR |
82 | static int __iommu_attach_device(struct iommu_domain *domain, |
83 | struct device *dev); | |
84 | static int __iommu_attach_group(struct iommu_domain *domain, | |
85 | struct iommu_group *group); | |
86 | static void __iommu_detach_group(struct iommu_domain *domain, | |
87 | struct iommu_group *group); | |
53723dc5 | 88 | |
d72e31c9 AW |
89 | static ssize_t iommu_group_attr_show(struct kobject *kobj, |
90 | struct attribute *__attr, char *buf) | |
1460432c | 91 | { |
d72e31c9 AW |
92 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); |
93 | struct iommu_group *group = to_iommu_group(kobj); | |
94 | ssize_t ret = -EIO; | |
1460432c | 95 | |
d72e31c9 AW |
96 | if (attr->show) |
97 | ret = attr->show(group, buf); | |
98 | return ret; | |
99 | } | |
100 | ||
101 | static ssize_t iommu_group_attr_store(struct kobject *kobj, | |
102 | struct attribute *__attr, | |
103 | const char *buf, size_t count) | |
104 | { | |
105 | struct iommu_group_attribute *attr = to_iommu_group_attr(__attr); | |
106 | struct iommu_group *group = to_iommu_group(kobj); | |
107 | ssize_t ret = -EIO; | |
1460432c | 108 | |
d72e31c9 AW |
109 | if (attr->store) |
110 | ret = attr->store(group, buf, count); | |
111 | return ret; | |
1460432c | 112 | } |
1460432c | 113 | |
d72e31c9 AW |
114 | static const struct sysfs_ops iommu_group_sysfs_ops = { |
115 | .show = iommu_group_attr_show, | |
116 | .store = iommu_group_attr_store, | |
117 | }; | |
1460432c | 118 | |
d72e31c9 AW |
119 | static int iommu_group_create_file(struct iommu_group *group, |
120 | struct iommu_group_attribute *attr) | |
121 | { | |
122 | return sysfs_create_file(&group->kobj, &attr->attr); | |
1460432c | 123 | } |
1460432c | 124 | |
d72e31c9 AW |
125 | static void iommu_group_remove_file(struct iommu_group *group, |
126 | struct iommu_group_attribute *attr) | |
127 | { | |
128 | sysfs_remove_file(&group->kobj, &attr->attr); | |
129 | } | |
130 | ||
131 | static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) | |
132 | { | |
133 | return sprintf(buf, "%s\n", group->name); | |
134 | } | |
135 | ||
136 | static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); | |
137 | ||
138 | static void iommu_group_release(struct kobject *kobj) | |
139 | { | |
140 | struct iommu_group *group = to_iommu_group(kobj); | |
141 | ||
269aa808 JR |
142 | pr_debug("Releasing group %d\n", group->id); |
143 | ||
d72e31c9 AW |
144 | if (group->iommu_data_release) |
145 | group->iommu_data_release(group->iommu_data); | |
146 | ||
feccf398 | 147 | ida_simple_remove(&iommu_group_ida, group->id); |
d72e31c9 | 148 | |
53723dc5 JR |
149 | if (group->default_domain) |
150 | iommu_domain_free(group->default_domain); | |
151 | ||
d72e31c9 AW |
152 | kfree(group->name); |
153 | kfree(group); | |
154 | } | |
155 | ||
156 | static struct kobj_type iommu_group_ktype = { | |
157 | .sysfs_ops = &iommu_group_sysfs_ops, | |
158 | .release = iommu_group_release, | |
159 | }; | |
160 | ||
161 | /** | |
162 | * iommu_group_alloc - Allocate a new group | |
163 | * @name: Optional name to associate with group, visible in sysfs | |
164 | * | |
165 | * This function is called by an iommu driver to allocate a new iommu | |
166 | * group. The iommu group represents the minimum granularity of the iommu. | |
167 | * Upon successful return, the caller holds a reference to the supplied | |
168 | * group in order to hold the group until devices are added. Use | |
169 | * iommu_group_put() to release this extra reference count, allowing the | |
170 | * group to be automatically reclaimed once it has no devices or external | |
171 | * references. | |
172 | */ | |
173 | struct iommu_group *iommu_group_alloc(void) | |
1460432c | 174 | { |
d72e31c9 AW |
175 | struct iommu_group *group; |
176 | int ret; | |
177 | ||
178 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
179 | if (!group) | |
180 | return ERR_PTR(-ENOMEM); | |
181 | ||
182 | group->kobj.kset = iommu_group_kset; | |
183 | mutex_init(&group->mutex); | |
184 | INIT_LIST_HEAD(&group->devices); | |
185 | BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); | |
186 | ||
feccf398 HK |
187 | ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); |
188 | if (ret < 0) { | |
d72e31c9 | 189 | kfree(group); |
feccf398 | 190 | return ERR_PTR(ret); |
d72e31c9 | 191 | } |
feccf398 | 192 | group->id = ret; |
1460432c | 193 | |
d72e31c9 AW |
194 | ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, |
195 | NULL, "%d", group->id); | |
196 | if (ret) { | |
feccf398 | 197 | ida_simple_remove(&iommu_group_ida, group->id); |
d72e31c9 AW |
198 | kfree(group); |
199 | return ERR_PTR(ret); | |
200 | } | |
201 | ||
202 | group->devices_kobj = kobject_create_and_add("devices", &group->kobj); | |
203 | if (!group->devices_kobj) { | |
204 | kobject_put(&group->kobj); /* triggers .release & free */ | |
205 | return ERR_PTR(-ENOMEM); | |
206 | } | |
207 | ||
208 | /* | |
209 | * The devices_kobj holds a reference on the group kobject, so | |
210 | * as long as that exists so will the group. We can therefore | |
211 | * use the devices_kobj for reference counting. | |
212 | */ | |
213 | kobject_put(&group->kobj); | |
214 | ||
269aa808 JR |
215 | pr_debug("Allocated group %d\n", group->id); |
216 | ||
d72e31c9 AW |
217 | return group; |
218 | } | |
219 | EXPORT_SYMBOL_GPL(iommu_group_alloc); | |
220 | ||
aa16bea9 AK |
221 | struct iommu_group *iommu_group_get_by_id(int id) |
222 | { | |
223 | struct kobject *group_kobj; | |
224 | struct iommu_group *group; | |
225 | const char *name; | |
226 | ||
227 | if (!iommu_group_kset) | |
228 | return NULL; | |
229 | ||
230 | name = kasprintf(GFP_KERNEL, "%d", id); | |
231 | if (!name) | |
232 | return NULL; | |
233 | ||
234 | group_kobj = kset_find_obj(iommu_group_kset, name); | |
235 | kfree(name); | |
236 | ||
237 | if (!group_kobj) | |
238 | return NULL; | |
239 | ||
240 | group = container_of(group_kobj, struct iommu_group, kobj); | |
241 | BUG_ON(group->id != id); | |
242 | ||
243 | kobject_get(group->devices_kobj); | |
244 | kobject_put(&group->kobj); | |
245 | ||
246 | return group; | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(iommu_group_get_by_id); | |
249 | ||
d72e31c9 AW |
250 | /** |
251 | * iommu_group_get_iommudata - retrieve iommu_data registered for a group | |
252 | * @group: the group | |
253 | * | |
254 | * iommu drivers can store data in the group for use when doing iommu | |
255 | * operations. This function provides a way to retrieve it. Caller | |
256 | * should hold a group reference. | |
257 | */ | |
258 | void *iommu_group_get_iommudata(struct iommu_group *group) | |
259 | { | |
260 | return group->iommu_data; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(iommu_group_get_iommudata); | |
263 | ||
264 | /** | |
265 | * iommu_group_set_iommudata - set iommu_data for a group | |
266 | * @group: the group | |
267 | * @iommu_data: new data | |
268 | * @release: release function for iommu_data | |
269 | * | |
270 | * iommu drivers can store data in the group for use when doing iommu | |
271 | * operations. This function provides a way to set the data after | |
272 | * the group has been allocated. Caller should hold a group reference. | |
273 | */ | |
274 | void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, | |
275 | void (*release)(void *iommu_data)) | |
1460432c | 276 | { |
d72e31c9 AW |
277 | group->iommu_data = iommu_data; |
278 | group->iommu_data_release = release; | |
279 | } | |
280 | EXPORT_SYMBOL_GPL(iommu_group_set_iommudata); | |
1460432c | 281 | |
d72e31c9 AW |
282 | /** |
283 | * iommu_group_set_name - set name for a group | |
284 | * @group: the group | |
285 | * @name: name | |
286 | * | |
287 | * Allow iommu driver to set a name for a group. When set it will | |
288 | * appear in a name attribute file under the group in sysfs. | |
289 | */ | |
290 | int iommu_group_set_name(struct iommu_group *group, const char *name) | |
291 | { | |
292 | int ret; | |
293 | ||
294 | if (group->name) { | |
295 | iommu_group_remove_file(group, &iommu_group_attr_name); | |
296 | kfree(group->name); | |
297 | group->name = NULL; | |
298 | if (!name) | |
299 | return 0; | |
300 | } | |
301 | ||
302 | group->name = kstrdup(name, GFP_KERNEL); | |
303 | if (!group->name) | |
304 | return -ENOMEM; | |
305 | ||
306 | ret = iommu_group_create_file(group, &iommu_group_attr_name); | |
307 | if (ret) { | |
308 | kfree(group->name); | |
309 | group->name = NULL; | |
310 | return ret; | |
311 | } | |
1460432c AW |
312 | |
313 | return 0; | |
314 | } | |
d72e31c9 | 315 | EXPORT_SYMBOL_GPL(iommu_group_set_name); |
1460432c | 316 | |
beed2821 JR |
317 | static int iommu_group_create_direct_mappings(struct iommu_group *group, |
318 | struct device *dev) | |
319 | { | |
320 | struct iommu_domain *domain = group->default_domain; | |
321 | struct iommu_dm_region *entry; | |
322 | struct list_head mappings; | |
323 | unsigned long pg_size; | |
324 | int ret = 0; | |
325 | ||
326 | if (!domain || domain->type != IOMMU_DOMAIN_DMA) | |
327 | return 0; | |
328 | ||
d16e0faa | 329 | BUG_ON(!domain->pgsize_bitmap); |
beed2821 | 330 | |
d16e0faa | 331 | pg_size = 1UL << __ffs(domain->pgsize_bitmap); |
beed2821 JR |
332 | INIT_LIST_HEAD(&mappings); |
333 | ||
334 | iommu_get_dm_regions(dev, &mappings); | |
335 | ||
336 | /* We need to consider overlapping regions for different devices */ | |
337 | list_for_each_entry(entry, &mappings, list) { | |
338 | dma_addr_t start, end, addr; | |
339 | ||
33b21a6b JR |
340 | if (domain->ops->apply_dm_region) |
341 | domain->ops->apply_dm_region(dev, domain, entry); | |
342 | ||
beed2821 JR |
343 | start = ALIGN(entry->start, pg_size); |
344 | end = ALIGN(entry->start + entry->length, pg_size); | |
345 | ||
346 | for (addr = start; addr < end; addr += pg_size) { | |
347 | phys_addr_t phys_addr; | |
348 | ||
349 | phys_addr = iommu_iova_to_phys(domain, addr); | |
350 | if (phys_addr) | |
351 | continue; | |
352 | ||
353 | ret = iommu_map(domain, addr, addr, pg_size, entry->prot); | |
354 | if (ret) | |
355 | goto out; | |
356 | } | |
357 | ||
358 | } | |
359 | ||
360 | out: | |
361 | iommu_put_dm_regions(dev, &mappings); | |
362 | ||
363 | return ret; | |
364 | } | |
365 | ||
d72e31c9 AW |
366 | /** |
367 | * iommu_group_add_device - add a device to an iommu group | |
368 | * @group: the group into which to add the device (reference should be held) | |
369 | * @dev: the device | |
370 | * | |
371 | * This function is called by an iommu driver to add a device into a | |
372 | * group. Adding a device increments the group reference count. | |
373 | */ | |
374 | int iommu_group_add_device(struct iommu_group *group, struct device *dev) | |
1460432c | 375 | { |
d72e31c9 AW |
376 | int ret, i = 0; |
377 | struct iommu_device *device; | |
378 | ||
379 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
380 | if (!device) | |
381 | return -ENOMEM; | |
382 | ||
383 | device->dev = dev; | |
1460432c | 384 | |
d72e31c9 AW |
385 | ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); |
386 | if (ret) { | |
387 | kfree(device); | |
388 | return ret; | |
389 | } | |
390 | ||
391 | device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); | |
392 | rename: | |
393 | if (!device->name) { | |
394 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
395 | kfree(device); | |
396 | return -ENOMEM; | |
397 | } | |
1460432c | 398 | |
d72e31c9 AW |
399 | ret = sysfs_create_link_nowarn(group->devices_kobj, |
400 | &dev->kobj, device->name); | |
401 | if (ret) { | |
402 | kfree(device->name); | |
403 | if (ret == -EEXIST && i >= 0) { | |
404 | /* | |
405 | * Account for the slim chance of collision | |
406 | * and append an instance to the name. | |
407 | */ | |
408 | device->name = kasprintf(GFP_KERNEL, "%s.%d", | |
409 | kobject_name(&dev->kobj), i++); | |
410 | goto rename; | |
411 | } | |
412 | ||
413 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
414 | kfree(device); | |
415 | return ret; | |
416 | } | |
417 | ||
418 | kobject_get(group->devices_kobj); | |
419 | ||
420 | dev->iommu_group = group; | |
421 | ||
beed2821 JR |
422 | iommu_group_create_direct_mappings(group, dev); |
423 | ||
d72e31c9 AW |
424 | mutex_lock(&group->mutex); |
425 | list_add_tail(&device->list, &group->devices); | |
e39cb8a3 JR |
426 | if (group->domain) |
427 | __iommu_attach_device(group->domain, dev); | |
d72e31c9 AW |
428 | mutex_unlock(&group->mutex); |
429 | ||
430 | /* Notify any listeners about change to group. */ | |
431 | blocking_notifier_call_chain(&group->notifier, | |
432 | IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); | |
d1cf7e82 SK |
433 | |
434 | trace_add_device_to_group(group->id, dev); | |
269aa808 JR |
435 | |
436 | pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); | |
437 | ||
1460432c AW |
438 | return 0; |
439 | } | |
d72e31c9 | 440 | EXPORT_SYMBOL_GPL(iommu_group_add_device); |
1460432c | 441 | |
d72e31c9 AW |
442 | /** |
443 | * iommu_group_remove_device - remove a device from it's current group | |
444 | * @dev: device to be removed | |
445 | * | |
446 | * This function is called by an iommu driver to remove the device from | |
447 | * it's current group. This decrements the iommu group reference count. | |
448 | */ | |
449 | void iommu_group_remove_device(struct device *dev) | |
450 | { | |
451 | struct iommu_group *group = dev->iommu_group; | |
452 | struct iommu_device *tmp_device, *device = NULL; | |
453 | ||
269aa808 JR |
454 | pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); |
455 | ||
d72e31c9 AW |
456 | /* Pre-notify listeners that a device is being removed. */ |
457 | blocking_notifier_call_chain(&group->notifier, | |
458 | IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); | |
459 | ||
460 | mutex_lock(&group->mutex); | |
461 | list_for_each_entry(tmp_device, &group->devices, list) { | |
462 | if (tmp_device->dev == dev) { | |
463 | device = tmp_device; | |
464 | list_del(&device->list); | |
465 | break; | |
466 | } | |
467 | } | |
468 | mutex_unlock(&group->mutex); | |
469 | ||
470 | if (!device) | |
471 | return; | |
472 | ||
473 | sysfs_remove_link(group->devices_kobj, device->name); | |
474 | sysfs_remove_link(&dev->kobj, "iommu_group"); | |
475 | ||
2e757086 SK |
476 | trace_remove_device_from_group(group->id, dev); |
477 | ||
d72e31c9 AW |
478 | kfree(device->name); |
479 | kfree(device); | |
480 | dev->iommu_group = NULL; | |
481 | kobject_put(group->devices_kobj); | |
482 | } | |
483 | EXPORT_SYMBOL_GPL(iommu_group_remove_device); | |
484 | ||
426a2738 JR |
485 | static int iommu_group_device_count(struct iommu_group *group) |
486 | { | |
487 | struct iommu_device *entry; | |
488 | int ret = 0; | |
489 | ||
490 | list_for_each_entry(entry, &group->devices, list) | |
491 | ret++; | |
492 | ||
493 | return ret; | |
494 | } | |
495 | ||
d72e31c9 AW |
496 | /** |
497 | * iommu_group_for_each_dev - iterate over each device in the group | |
498 | * @group: the group | |
499 | * @data: caller opaque data to be passed to callback function | |
500 | * @fn: caller supplied callback function | |
501 | * | |
502 | * This function is called by group users to iterate over group devices. | |
503 | * Callers should hold a reference count to the group during callback. | |
504 | * The group->mutex is held across callbacks, which will block calls to | |
505 | * iommu_group_add/remove_device. | |
506 | */ | |
e39cb8a3 JR |
507 | static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, |
508 | int (*fn)(struct device *, void *)) | |
d72e31c9 AW |
509 | { |
510 | struct iommu_device *device; | |
511 | int ret = 0; | |
512 | ||
d72e31c9 AW |
513 | list_for_each_entry(device, &group->devices, list) { |
514 | ret = fn(device->dev, data); | |
515 | if (ret) | |
516 | break; | |
517 | } | |
e39cb8a3 JR |
518 | return ret; |
519 | } | |
520 | ||
521 | ||
522 | int iommu_group_for_each_dev(struct iommu_group *group, void *data, | |
523 | int (*fn)(struct device *, void *)) | |
524 | { | |
525 | int ret; | |
526 | ||
527 | mutex_lock(&group->mutex); | |
528 | ret = __iommu_group_for_each_dev(group, data, fn); | |
d72e31c9 | 529 | mutex_unlock(&group->mutex); |
e39cb8a3 | 530 | |
d72e31c9 AW |
531 | return ret; |
532 | } | |
533 | EXPORT_SYMBOL_GPL(iommu_group_for_each_dev); | |
534 | ||
535 | /** | |
536 | * iommu_group_get - Return the group for a device and increment reference | |
537 | * @dev: get the group that this device belongs to | |
538 | * | |
539 | * This function is called by iommu drivers and users to get the group | |
540 | * for the specified device. If found, the group is returned and the group | |
541 | * reference in incremented, else NULL. | |
542 | */ | |
543 | struct iommu_group *iommu_group_get(struct device *dev) | |
544 | { | |
545 | struct iommu_group *group = dev->iommu_group; | |
546 | ||
547 | if (group) | |
548 | kobject_get(group->devices_kobj); | |
549 | ||
550 | return group; | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(iommu_group_get); | |
553 | ||
13f59a78 RM |
554 | /** |
555 | * iommu_group_ref_get - Increment reference on a group | |
556 | * @group: the group to use, must not be NULL | |
557 | * | |
558 | * This function is called by iommu drivers to take additional references on an | |
559 | * existing group. Returns the given group for convenience. | |
560 | */ | |
561 | struct iommu_group *iommu_group_ref_get(struct iommu_group *group) | |
562 | { | |
563 | kobject_get(group->devices_kobj); | |
564 | return group; | |
565 | } | |
566 | ||
d72e31c9 AW |
567 | /** |
568 | * iommu_group_put - Decrement group reference | |
569 | * @group: the group to use | |
570 | * | |
571 | * This function is called by iommu drivers and users to release the | |
572 | * iommu group. Once the reference count is zero, the group is released. | |
573 | */ | |
574 | void iommu_group_put(struct iommu_group *group) | |
575 | { | |
576 | if (group) | |
577 | kobject_put(group->devices_kobj); | |
578 | } | |
579 | EXPORT_SYMBOL_GPL(iommu_group_put); | |
580 | ||
581 | /** | |
582 | * iommu_group_register_notifier - Register a notifier for group changes | |
583 | * @group: the group to watch | |
584 | * @nb: notifier block to signal | |
585 | * | |
586 | * This function allows iommu group users to track changes in a group. | |
587 | * See include/linux/iommu.h for actions sent via this notifier. Caller | |
588 | * should hold a reference to the group throughout notifier registration. | |
589 | */ | |
590 | int iommu_group_register_notifier(struct iommu_group *group, | |
591 | struct notifier_block *nb) | |
592 | { | |
593 | return blocking_notifier_chain_register(&group->notifier, nb); | |
594 | } | |
595 | EXPORT_SYMBOL_GPL(iommu_group_register_notifier); | |
596 | ||
597 | /** | |
598 | * iommu_group_unregister_notifier - Unregister a notifier | |
599 | * @group: the group to watch | |
600 | * @nb: notifier block to signal | |
601 | * | |
602 | * Unregister a previously registered group notifier block. | |
603 | */ | |
604 | int iommu_group_unregister_notifier(struct iommu_group *group, | |
605 | struct notifier_block *nb) | |
606 | { | |
607 | return blocking_notifier_chain_unregister(&group->notifier, nb); | |
608 | } | |
609 | EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); | |
610 | ||
611 | /** | |
612 | * iommu_group_id - Return ID for a group | |
613 | * @group: the group to ID | |
614 | * | |
615 | * Return the unique ID for the group matching the sysfs group number. | |
616 | */ | |
617 | int iommu_group_id(struct iommu_group *group) | |
618 | { | |
619 | return group->id; | |
620 | } | |
621 | EXPORT_SYMBOL_GPL(iommu_group_id); | |
1460432c | 622 | |
f096c061 AW |
623 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, |
624 | unsigned long *devfns); | |
625 | ||
104a1c13 AW |
626 | /* |
627 | * To consider a PCI device isolated, we require ACS to support Source | |
628 | * Validation, Request Redirection, Completer Redirection, and Upstream | |
629 | * Forwarding. This effectively means that devices cannot spoof their | |
630 | * requester ID, requests and completions cannot be redirected, and all | |
631 | * transactions are forwarded upstream, even as it passes through a | |
632 | * bridge where the target device is downstream. | |
633 | */ | |
634 | #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF) | |
635 | ||
f096c061 AW |
636 | /* |
637 | * For multifunction devices which are not isolated from each other, find | |
638 | * all the other non-isolated functions and look for existing groups. For | |
639 | * each function, we also need to look for aliases to or from other devices | |
640 | * that may already have a group. | |
641 | */ | |
642 | static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, | |
643 | unsigned long *devfns) | |
644 | { | |
645 | struct pci_dev *tmp = NULL; | |
646 | struct iommu_group *group; | |
647 | ||
648 | if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS)) | |
649 | return NULL; | |
650 | ||
651 | for_each_pci_dev(tmp) { | |
652 | if (tmp == pdev || tmp->bus != pdev->bus || | |
653 | PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) || | |
654 | pci_acs_enabled(tmp, REQ_ACS_FLAGS)) | |
655 | continue; | |
656 | ||
657 | group = get_pci_alias_group(tmp, devfns); | |
658 | if (group) { | |
659 | pci_dev_put(tmp); | |
660 | return group; | |
661 | } | |
662 | } | |
663 | ||
664 | return NULL; | |
665 | } | |
666 | ||
667 | /* | |
338c3149 JL |
668 | * Look for aliases to or from the given device for existing groups. DMA |
669 | * aliases are only supported on the same bus, therefore the search | |
f096c061 AW |
670 | * space is quite small (especially since we're really only looking at pcie |
671 | * device, and therefore only expect multiple slots on the root complex or | |
672 | * downstream switch ports). It's conceivable though that a pair of | |
673 | * multifunction devices could have aliases between them that would cause a | |
674 | * loop. To prevent this, we use a bitmap to track where we've been. | |
675 | */ | |
676 | static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, | |
677 | unsigned long *devfns) | |
678 | { | |
679 | struct pci_dev *tmp = NULL; | |
680 | struct iommu_group *group; | |
681 | ||
682 | if (test_and_set_bit(pdev->devfn & 0xff, devfns)) | |
683 | return NULL; | |
684 | ||
685 | group = iommu_group_get(&pdev->dev); | |
686 | if (group) | |
687 | return group; | |
688 | ||
689 | for_each_pci_dev(tmp) { | |
690 | if (tmp == pdev || tmp->bus != pdev->bus) | |
691 | continue; | |
692 | ||
693 | /* We alias them or they alias us */ | |
338c3149 | 694 | if (pci_devs_are_dma_aliases(pdev, tmp)) { |
f096c061 AW |
695 | group = get_pci_alias_group(tmp, devfns); |
696 | if (group) { | |
697 | pci_dev_put(tmp); | |
698 | return group; | |
699 | } | |
700 | ||
701 | group = get_pci_function_alias_group(tmp, devfns); | |
702 | if (group) { | |
703 | pci_dev_put(tmp); | |
704 | return group; | |
705 | } | |
706 | } | |
707 | } | |
708 | ||
709 | return NULL; | |
710 | } | |
711 | ||
104a1c13 AW |
712 | struct group_for_pci_data { |
713 | struct pci_dev *pdev; | |
714 | struct iommu_group *group; | |
715 | }; | |
716 | ||
717 | /* | |
718 | * DMA alias iterator callback, return the last seen device. Stop and return | |
719 | * the IOMMU group if we find one along the way. | |
720 | */ | |
721 | static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) | |
722 | { | |
723 | struct group_for_pci_data *data = opaque; | |
724 | ||
725 | data->pdev = pdev; | |
726 | data->group = iommu_group_get(&pdev->dev); | |
727 | ||
728 | return data->group != NULL; | |
729 | } | |
730 | ||
6eab556a JR |
731 | /* |
732 | * Generic device_group call-back function. It just allocates one | |
733 | * iommu-group per device. | |
734 | */ | |
735 | struct iommu_group *generic_device_group(struct device *dev) | |
736 | { | |
737 | struct iommu_group *group; | |
738 | ||
739 | group = iommu_group_alloc(); | |
740 | if (IS_ERR(group)) | |
741 | return NULL; | |
742 | ||
743 | return group; | |
744 | } | |
745 | ||
104a1c13 AW |
746 | /* |
747 | * Use standard PCI bus topology, isolation features, and DMA alias quirks | |
748 | * to find or create an IOMMU group for a device. | |
749 | */ | |
5e62292b | 750 | struct iommu_group *pci_device_group(struct device *dev) |
104a1c13 | 751 | { |
5e62292b | 752 | struct pci_dev *pdev = to_pci_dev(dev); |
104a1c13 AW |
753 | struct group_for_pci_data data; |
754 | struct pci_bus *bus; | |
755 | struct iommu_group *group = NULL; | |
f096c061 | 756 | u64 devfns[4] = { 0 }; |
104a1c13 | 757 | |
5e62292b JR |
758 | if (WARN_ON(!dev_is_pci(dev))) |
759 | return ERR_PTR(-EINVAL); | |
760 | ||
104a1c13 AW |
761 | /* |
762 | * Find the upstream DMA alias for the device. A device must not | |
763 | * be aliased due to topology in order to have its own IOMMU group. | |
764 | * If we find an alias along the way that already belongs to a | |
765 | * group, use it. | |
766 | */ | |
767 | if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data)) | |
768 | return data.group; | |
769 | ||
770 | pdev = data.pdev; | |
771 | ||
772 | /* | |
773 | * Continue upstream from the point of minimum IOMMU granularity | |
774 | * due to aliases to the point where devices are protected from | |
775 | * peer-to-peer DMA by PCI ACS. Again, if we find an existing | |
776 | * group, use it. | |
777 | */ | |
778 | for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { | |
779 | if (!bus->self) | |
780 | continue; | |
781 | ||
782 | if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) | |
783 | break; | |
784 | ||
785 | pdev = bus->self; | |
786 | ||
787 | group = iommu_group_get(&pdev->dev); | |
788 | if (group) | |
789 | return group; | |
790 | } | |
791 | ||
792 | /* | |
f096c061 AW |
793 | * Look for existing groups on device aliases. If we alias another |
794 | * device or another device aliases us, use the same group. | |
104a1c13 | 795 | */ |
f096c061 AW |
796 | group = get_pci_alias_group(pdev, (unsigned long *)devfns); |
797 | if (group) | |
798 | return group; | |
104a1c13 AW |
799 | |
800 | /* | |
f096c061 AW |
801 | * Look for existing groups on non-isolated functions on the same |
802 | * slot and aliases of those funcions, if any. No need to clear | |
803 | * the search bitmap, the tested devfns are still valid. | |
104a1c13 | 804 | */ |
f096c061 AW |
805 | group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); |
806 | if (group) | |
807 | return group; | |
104a1c13 AW |
808 | |
809 | /* No shared group found, allocate new */ | |
53723dc5 | 810 | group = iommu_group_alloc(); |
409e553d DC |
811 | if (IS_ERR(group)) |
812 | return NULL; | |
813 | ||
53723dc5 | 814 | return group; |
104a1c13 AW |
815 | } |
816 | ||
817 | /** | |
818 | * iommu_group_get_for_dev - Find or create the IOMMU group for a device | |
819 | * @dev: target device | |
820 | * | |
821 | * This function is intended to be called by IOMMU drivers and extended to | |
822 | * support common, bus-defined algorithms when determining or creating the | |
823 | * IOMMU group for a device. On success, the caller will hold a reference | |
824 | * to the returned IOMMU group, which will already include the provided | |
825 | * device. The reference should be released with iommu_group_put(). | |
826 | */ | |
827 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) | |
828 | { | |
46c6b2bc | 829 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
c4a783b8 | 830 | struct iommu_group *group; |
104a1c13 AW |
831 | int ret; |
832 | ||
833 | group = iommu_group_get(dev); | |
834 | if (group) | |
835 | return group; | |
836 | ||
46c6b2bc | 837 | group = ERR_PTR(-EINVAL); |
c4a783b8 | 838 | |
46c6b2bc JR |
839 | if (ops && ops->device_group) |
840 | group = ops->device_group(dev); | |
104a1c13 AW |
841 | |
842 | if (IS_ERR(group)) | |
843 | return group; | |
844 | ||
1228236d JR |
845 | /* |
846 | * Try to allocate a default domain - needs support from the | |
847 | * IOMMU driver. | |
848 | */ | |
849 | if (!group->default_domain) { | |
850 | group->default_domain = __iommu_domain_alloc(dev->bus, | |
851 | IOMMU_DOMAIN_DMA); | |
eebb8034 JR |
852 | if (!group->domain) |
853 | group->domain = group->default_domain; | |
1228236d JR |
854 | } |
855 | ||
104a1c13 AW |
856 | ret = iommu_group_add_device(group, dev); |
857 | if (ret) { | |
858 | iommu_group_put(group); | |
859 | return ERR_PTR(ret); | |
860 | } | |
861 | ||
862 | return group; | |
863 | } | |
864 | ||
6827ca83 JR |
865 | struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) |
866 | { | |
867 | return group->default_domain; | |
868 | } | |
869 | ||
d72e31c9 | 870 | static int add_iommu_group(struct device *dev, void *data) |
1460432c | 871 | { |
b22f6434 TR |
872 | struct iommu_callback_data *cb = data; |
873 | const struct iommu_ops *ops = cb->ops; | |
38667f18 | 874 | int ret; |
d72e31c9 AW |
875 | |
876 | if (!ops->add_device) | |
461bfb3f | 877 | return 0; |
1460432c | 878 | |
d72e31c9 AW |
879 | WARN_ON(dev->iommu_group); |
880 | ||
38667f18 JR |
881 | ret = ops->add_device(dev); |
882 | ||
883 | /* | |
884 | * We ignore -ENODEV errors for now, as they just mean that the | |
885 | * device is not translated by an IOMMU. We still care about | |
886 | * other errors and fail to initialize when they happen. | |
887 | */ | |
888 | if (ret == -ENODEV) | |
889 | ret = 0; | |
890 | ||
891 | return ret; | |
1460432c AW |
892 | } |
893 | ||
8da30142 JR |
894 | static int remove_iommu_group(struct device *dev, void *data) |
895 | { | |
896 | struct iommu_callback_data *cb = data; | |
897 | const struct iommu_ops *ops = cb->ops; | |
898 | ||
899 | if (ops->remove_device && dev->iommu_group) | |
900 | ops->remove_device(dev); | |
1460432c AW |
901 | |
902 | return 0; | |
903 | } | |
904 | ||
d72e31c9 AW |
905 | static int iommu_bus_notifier(struct notifier_block *nb, |
906 | unsigned long action, void *data) | |
1460432c AW |
907 | { |
908 | struct device *dev = data; | |
b22f6434 | 909 | const struct iommu_ops *ops = dev->bus->iommu_ops; |
d72e31c9 AW |
910 | struct iommu_group *group; |
911 | unsigned long group_action = 0; | |
912 | ||
913 | /* | |
914 | * ADD/DEL call into iommu driver ops if provided, which may | |
915 | * result in ADD/DEL notifiers to group->notifier | |
916 | */ | |
917 | if (action == BUS_NOTIFY_ADD_DEVICE) { | |
918 | if (ops->add_device) | |
919 | return ops->add_device(dev); | |
843cb6dc | 920 | } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { |
d72e31c9 AW |
921 | if (ops->remove_device && dev->iommu_group) { |
922 | ops->remove_device(dev); | |
923 | return 0; | |
924 | } | |
925 | } | |
1460432c | 926 | |
d72e31c9 AW |
927 | /* |
928 | * Remaining BUS_NOTIFYs get filtered and republished to the | |
929 | * group, if anyone is listening | |
930 | */ | |
931 | group = iommu_group_get(dev); | |
932 | if (!group) | |
933 | return 0; | |
1460432c | 934 | |
d72e31c9 AW |
935 | switch (action) { |
936 | case BUS_NOTIFY_BIND_DRIVER: | |
937 | group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; | |
938 | break; | |
939 | case BUS_NOTIFY_BOUND_DRIVER: | |
940 | group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; | |
941 | break; | |
942 | case BUS_NOTIFY_UNBIND_DRIVER: | |
943 | group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; | |
944 | break; | |
945 | case BUS_NOTIFY_UNBOUND_DRIVER: | |
946 | group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; | |
947 | break; | |
948 | } | |
1460432c | 949 | |
d72e31c9 AW |
950 | if (group_action) |
951 | blocking_notifier_call_chain(&group->notifier, | |
952 | group_action, dev); | |
1460432c | 953 | |
d72e31c9 | 954 | iommu_group_put(group); |
1460432c AW |
955 | return 0; |
956 | } | |
957 | ||
fb3e3065 | 958 | static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) |
ff21776d | 959 | { |
fb3e3065 MS |
960 | int err; |
961 | struct notifier_block *nb; | |
b22f6434 TR |
962 | struct iommu_callback_data cb = { |
963 | .ops = ops, | |
964 | }; | |
965 | ||
fb3e3065 MS |
966 | nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); |
967 | if (!nb) | |
968 | return -ENOMEM; | |
969 | ||
970 | nb->notifier_call = iommu_bus_notifier; | |
971 | ||
972 | err = bus_register_notifier(bus, nb); | |
8da30142 JR |
973 | if (err) |
974 | goto out_free; | |
d7da6bdc HS |
975 | |
976 | err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); | |
8da30142 JR |
977 | if (err) |
978 | goto out_err; | |
979 | ||
d7da6bdc HS |
980 | |
981 | return 0; | |
8da30142 JR |
982 | |
983 | out_err: | |
984 | /* Clean up */ | |
985 | bus_for_each_dev(bus, NULL, &cb, remove_iommu_group); | |
986 | bus_unregister_notifier(bus, nb); | |
987 | ||
988 | out_free: | |
989 | kfree(nb); | |
990 | ||
991 | return err; | |
ff21776d | 992 | } |
fc2100eb | 993 | |
ff21776d JR |
994 | /** |
995 | * bus_set_iommu - set iommu-callbacks for the bus | |
996 | * @bus: bus. | |
997 | * @ops: the callbacks provided by the iommu-driver | |
998 | * | |
999 | * This function is called by an iommu driver to set the iommu methods | |
1000 | * used for a particular bus. Drivers for devices on that bus can use | |
1001 | * the iommu-api after these ops are registered. | |
1002 | * This special function is needed because IOMMUs are usually devices on | |
1003 | * the bus itself, so the iommu drivers are not initialized when the bus | |
1004 | * is set up. With this function the iommu-driver can set the iommu-ops | |
1005 | * afterwards. | |
1006 | */ | |
b22f6434 | 1007 | int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) |
fc2100eb | 1008 | { |
d7da6bdc HS |
1009 | int err; |
1010 | ||
ff21776d JR |
1011 | if (bus->iommu_ops != NULL) |
1012 | return -EBUSY; | |
fc2100eb | 1013 | |
ff21776d JR |
1014 | bus->iommu_ops = ops; |
1015 | ||
1016 | /* Do IOMMU specific setup for this bus-type */ | |
d7da6bdc HS |
1017 | err = iommu_bus_init(bus, ops); |
1018 | if (err) | |
1019 | bus->iommu_ops = NULL; | |
1020 | ||
1021 | return err; | |
fc2100eb | 1022 | } |
ff21776d | 1023 | EXPORT_SYMBOL_GPL(bus_set_iommu); |
fc2100eb | 1024 | |
a1b60c1c | 1025 | bool iommu_present(struct bus_type *bus) |
fc2100eb | 1026 | { |
94441c3b | 1027 | return bus->iommu_ops != NULL; |
fc2100eb | 1028 | } |
a1b60c1c | 1029 | EXPORT_SYMBOL_GPL(iommu_present); |
fc2100eb | 1030 | |
3c0e0ca0 JR |
1031 | bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) |
1032 | { | |
1033 | if (!bus->iommu_ops || !bus->iommu_ops->capable) | |
1034 | return false; | |
1035 | ||
1036 | return bus->iommu_ops->capable(cap); | |
1037 | } | |
1038 | EXPORT_SYMBOL_GPL(iommu_capable); | |
1039 | ||
4f3f8d9d OBC |
1040 | /** |
1041 | * iommu_set_fault_handler() - set a fault handler for an iommu domain | |
1042 | * @domain: iommu domain | |
1043 | * @handler: fault handler | |
77ca2332 | 1044 | * @token: user data, will be passed back to the fault handler |
0ed6d2d2 OBC |
1045 | * |
1046 | * This function should be used by IOMMU users which want to be notified | |
1047 | * whenever an IOMMU fault happens. | |
1048 | * | |
1049 | * The fault handler itself should return 0 on success, and an appropriate | |
1050 | * error code otherwise. | |
4f3f8d9d OBC |
1051 | */ |
1052 | void iommu_set_fault_handler(struct iommu_domain *domain, | |
77ca2332 OBC |
1053 | iommu_fault_handler_t handler, |
1054 | void *token) | |
4f3f8d9d OBC |
1055 | { |
1056 | BUG_ON(!domain); | |
1057 | ||
1058 | domain->handler = handler; | |
77ca2332 | 1059 | domain->handler_token = token; |
4f3f8d9d | 1060 | } |
30bd918c | 1061 | EXPORT_SYMBOL_GPL(iommu_set_fault_handler); |
4f3f8d9d | 1062 | |
53723dc5 JR |
1063 | static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, |
1064 | unsigned type) | |
fc2100eb JR |
1065 | { |
1066 | struct iommu_domain *domain; | |
fc2100eb | 1067 | |
94441c3b | 1068 | if (bus == NULL || bus->iommu_ops == NULL) |
905d66c1 JR |
1069 | return NULL; |
1070 | ||
53723dc5 | 1071 | domain = bus->iommu_ops->domain_alloc(type); |
fc2100eb JR |
1072 | if (!domain) |
1073 | return NULL; | |
1074 | ||
8539c7c1 | 1075 | domain->ops = bus->iommu_ops; |
53723dc5 | 1076 | domain->type = type; |
d16e0faa RM |
1077 | /* Assume all sizes by default; the driver may override this later */ |
1078 | domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; | |
905d66c1 | 1079 | |
fc2100eb | 1080 | return domain; |
fc2100eb | 1081 | } |
fc2100eb | 1082 | |
53723dc5 JR |
1083 | struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) |
1084 | { | |
1085 | return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED); | |
fc2100eb JR |
1086 | } |
1087 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); | |
1088 | ||
1089 | void iommu_domain_free(struct iommu_domain *domain) | |
1090 | { | |
89be34a1 | 1091 | domain->ops->domain_free(domain); |
fc2100eb JR |
1092 | } |
1093 | EXPORT_SYMBOL_GPL(iommu_domain_free); | |
1094 | ||
426a2738 JR |
1095 | static int __iommu_attach_device(struct iommu_domain *domain, |
1096 | struct device *dev) | |
fc2100eb | 1097 | { |
b54db778 | 1098 | int ret; |
e5aa7f00 JR |
1099 | if (unlikely(domain->ops->attach_dev == NULL)) |
1100 | return -ENODEV; | |
1101 | ||
b54db778 SK |
1102 | ret = domain->ops->attach_dev(domain, dev); |
1103 | if (!ret) | |
1104 | trace_attach_device_to_domain(dev); | |
1105 | return ret; | |
fc2100eb | 1106 | } |
426a2738 JR |
1107 | |
1108 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) | |
1109 | { | |
1110 | struct iommu_group *group; | |
1111 | int ret; | |
1112 | ||
1113 | group = iommu_group_get(dev); | |
1114 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1115 | if (group == NULL) | |
1116 | return __iommu_attach_device(domain, dev); | |
1117 | ||
1118 | /* | |
1119 | * We have a group - lock it to make sure the device-count doesn't | |
1120 | * change while we are attaching | |
1121 | */ | |
1122 | mutex_lock(&group->mutex); | |
1123 | ret = -EINVAL; | |
1124 | if (iommu_group_device_count(group) != 1) | |
1125 | goto out_unlock; | |
1126 | ||
e39cb8a3 | 1127 | ret = __iommu_attach_group(domain, group); |
426a2738 JR |
1128 | |
1129 | out_unlock: | |
1130 | mutex_unlock(&group->mutex); | |
1131 | iommu_group_put(group); | |
1132 | ||
1133 | return ret; | |
1134 | } | |
fc2100eb JR |
1135 | EXPORT_SYMBOL_GPL(iommu_attach_device); |
1136 | ||
426a2738 JR |
1137 | static void __iommu_detach_device(struct iommu_domain *domain, |
1138 | struct device *dev) | |
fc2100eb | 1139 | { |
e5aa7f00 JR |
1140 | if (unlikely(domain->ops->detach_dev == NULL)) |
1141 | return; | |
1142 | ||
1143 | domain->ops->detach_dev(domain, dev); | |
69980630 | 1144 | trace_detach_device_from_domain(dev); |
fc2100eb | 1145 | } |
426a2738 JR |
1146 | |
1147 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | |
1148 | { | |
1149 | struct iommu_group *group; | |
1150 | ||
1151 | group = iommu_group_get(dev); | |
1152 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1153 | if (group == NULL) | |
1154 | return __iommu_detach_device(domain, dev); | |
1155 | ||
1156 | mutex_lock(&group->mutex); | |
1157 | if (iommu_group_device_count(group) != 1) { | |
1158 | WARN_ON(1); | |
1159 | goto out_unlock; | |
1160 | } | |
1161 | ||
e39cb8a3 | 1162 | __iommu_detach_group(domain, group); |
426a2738 JR |
1163 | |
1164 | out_unlock: | |
1165 | mutex_unlock(&group->mutex); | |
1166 | iommu_group_put(group); | |
1167 | } | |
fc2100eb JR |
1168 | EXPORT_SYMBOL_GPL(iommu_detach_device); |
1169 | ||
2c1296d9 JR |
1170 | struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) |
1171 | { | |
1172 | struct iommu_domain *domain; | |
1173 | struct iommu_group *group; | |
1174 | ||
1175 | group = iommu_group_get(dev); | |
1176 | /* FIXME: Remove this when groups a mandatory for iommu drivers */ | |
1177 | if (group == NULL) | |
1178 | return NULL; | |
1179 | ||
1180 | domain = group->domain; | |
1181 | ||
1182 | iommu_group_put(group); | |
1183 | ||
1184 | return domain; | |
1185 | } | |
1186 | EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev); | |
fc2100eb | 1187 | |
d72e31c9 AW |
1188 | /* |
1189 | * IOMMU groups are really the natrual working unit of the IOMMU, but | |
1190 | * the IOMMU API works on domains and devices. Bridge that gap by | |
1191 | * iterating over the devices in a group. Ideally we'd have a single | |
1192 | * device which represents the requestor ID of the group, but we also | |
1193 | * allow IOMMU drivers to create policy defined minimum sets, where | |
1194 | * the physical hardware may be able to distiguish members, but we | |
1195 | * wish to group them at a higher level (ex. untrusted multi-function | |
1196 | * PCI devices). Thus we attach each device. | |
1197 | */ | |
1198 | static int iommu_group_do_attach_device(struct device *dev, void *data) | |
1199 | { | |
1200 | struct iommu_domain *domain = data; | |
1201 | ||
426a2738 | 1202 | return __iommu_attach_device(domain, dev); |
d72e31c9 AW |
1203 | } |
1204 | ||
e39cb8a3 JR |
1205 | static int __iommu_attach_group(struct iommu_domain *domain, |
1206 | struct iommu_group *group) | |
1207 | { | |
1208 | int ret; | |
1209 | ||
1210 | if (group->default_domain && group->domain != group->default_domain) | |
1211 | return -EBUSY; | |
1212 | ||
1213 | ret = __iommu_group_for_each_dev(group, domain, | |
1214 | iommu_group_do_attach_device); | |
1215 | if (ret == 0) | |
1216 | group->domain = domain; | |
1217 | ||
1218 | return ret; | |
d72e31c9 AW |
1219 | } |
1220 | ||
1221 | int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) | |
1222 | { | |
e39cb8a3 JR |
1223 | int ret; |
1224 | ||
1225 | mutex_lock(&group->mutex); | |
1226 | ret = __iommu_attach_group(domain, group); | |
1227 | mutex_unlock(&group->mutex); | |
1228 | ||
1229 | return ret; | |
d72e31c9 AW |
1230 | } |
1231 | EXPORT_SYMBOL_GPL(iommu_attach_group); | |
1232 | ||
1233 | static int iommu_group_do_detach_device(struct device *dev, void *data) | |
1234 | { | |
1235 | struct iommu_domain *domain = data; | |
1236 | ||
426a2738 | 1237 | __iommu_detach_device(domain, dev); |
d72e31c9 AW |
1238 | |
1239 | return 0; | |
1240 | } | |
1241 | ||
e39cb8a3 JR |
1242 | static void __iommu_detach_group(struct iommu_domain *domain, |
1243 | struct iommu_group *group) | |
1244 | { | |
1245 | int ret; | |
1246 | ||
1247 | if (!group->default_domain) { | |
1248 | __iommu_group_for_each_dev(group, domain, | |
1249 | iommu_group_do_detach_device); | |
1250 | group->domain = NULL; | |
1251 | return; | |
1252 | } | |
1253 | ||
1254 | if (group->domain == group->default_domain) | |
1255 | return; | |
1256 | ||
1257 | /* Detach by re-attaching to the default domain */ | |
1258 | ret = __iommu_group_for_each_dev(group, group->default_domain, | |
1259 | iommu_group_do_attach_device); | |
1260 | if (ret != 0) | |
1261 | WARN_ON(1); | |
1262 | else | |
1263 | group->domain = group->default_domain; | |
1264 | } | |
1265 | ||
d72e31c9 AW |
1266 | void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) |
1267 | { | |
e39cb8a3 JR |
1268 | mutex_lock(&group->mutex); |
1269 | __iommu_detach_group(domain, group); | |
1270 | mutex_unlock(&group->mutex); | |
d72e31c9 AW |
1271 | } |
1272 | EXPORT_SYMBOL_GPL(iommu_detach_group); | |
1273 | ||
bb5547ac | 1274 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) |
fc2100eb | 1275 | { |
e5aa7f00 JR |
1276 | if (unlikely(domain->ops->iova_to_phys == NULL)) |
1277 | return 0; | |
1278 | ||
1279 | return domain->ops->iova_to_phys(domain, iova); | |
fc2100eb JR |
1280 | } |
1281 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | |
dbb9fd86 | 1282 | |
bd13969b AW |
1283 | static size_t iommu_pgsize(struct iommu_domain *domain, |
1284 | unsigned long addr_merge, size_t size) | |
1285 | { | |
1286 | unsigned int pgsize_idx; | |
1287 | size_t pgsize; | |
1288 | ||
1289 | /* Max page size that still fits into 'size' */ | |
1290 | pgsize_idx = __fls(size); | |
1291 | ||
1292 | /* need to consider alignment requirements ? */ | |
1293 | if (likely(addr_merge)) { | |
1294 | /* Max page size allowed by address */ | |
1295 | unsigned int align_pgsize_idx = __ffs(addr_merge); | |
1296 | pgsize_idx = min(pgsize_idx, align_pgsize_idx); | |
1297 | } | |
1298 | ||
1299 | /* build a mask of acceptable page sizes */ | |
1300 | pgsize = (1UL << (pgsize_idx + 1)) - 1; | |
1301 | ||
1302 | /* throw away page sizes not supported by the hardware */ | |
d16e0faa | 1303 | pgsize &= domain->pgsize_bitmap; |
bd13969b AW |
1304 | |
1305 | /* make sure we're still sane */ | |
1306 | BUG_ON(!pgsize); | |
1307 | ||
1308 | /* pick the biggest page */ | |
1309 | pgsize_idx = __fls(pgsize); | |
1310 | pgsize = 1UL << pgsize_idx; | |
1311 | ||
1312 | return pgsize; | |
1313 | } | |
1314 | ||
cefc53c7 | 1315 | int iommu_map(struct iommu_domain *domain, unsigned long iova, |
7d3002cc | 1316 | phys_addr_t paddr, size_t size, int prot) |
cefc53c7 | 1317 | { |
7d3002cc OBC |
1318 | unsigned long orig_iova = iova; |
1319 | unsigned int min_pagesz; | |
1320 | size_t orig_size = size; | |
06bfcaa9 | 1321 | phys_addr_t orig_paddr = paddr; |
7d3002cc | 1322 | int ret = 0; |
cefc53c7 | 1323 | |
9db4ad91 | 1324 | if (unlikely(domain->ops->map == NULL || |
d16e0faa | 1325 | domain->pgsize_bitmap == 0UL)) |
e5aa7f00 | 1326 | return -ENODEV; |
cefc53c7 | 1327 | |
a10315e5 JR |
1328 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
1329 | return -EINVAL; | |
1330 | ||
7d3002cc | 1331 | /* find out the minimum page size supported */ |
d16e0faa | 1332 | min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
7d3002cc OBC |
1333 | |
1334 | /* | |
1335 | * both the virtual address and the physical one, as well as | |
1336 | * the size of the mapping, must be aligned (at least) to the | |
1337 | * size of the smallest page supported by the hardware | |
1338 | */ | |
1339 | if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { | |
abedb049 | 1340 | pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", |
6197ca82 | 1341 | iova, &paddr, size, min_pagesz); |
7d3002cc OBC |
1342 | return -EINVAL; |
1343 | } | |
1344 | ||
abedb049 | 1345 | pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); |
7d3002cc OBC |
1346 | |
1347 | while (size) { | |
bd13969b | 1348 | size_t pgsize = iommu_pgsize(domain, iova | paddr, size); |
7d3002cc | 1349 | |
abedb049 | 1350 | pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", |
6197ca82 | 1351 | iova, &paddr, pgsize); |
7d3002cc OBC |
1352 | |
1353 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); | |
1354 | if (ret) | |
1355 | break; | |
1356 | ||
1357 | iova += pgsize; | |
1358 | paddr += pgsize; | |
1359 | size -= pgsize; | |
1360 | } | |
1361 | ||
1362 | /* unroll mapping in case something went wrong */ | |
1363 | if (ret) | |
1364 | iommu_unmap(domain, orig_iova, orig_size - size); | |
e0be7c86 | 1365 | else |
06bfcaa9 | 1366 | trace_map(orig_iova, orig_paddr, orig_size); |
7d3002cc OBC |
1367 | |
1368 | return ret; | |
cefc53c7 JR |
1369 | } |
1370 | EXPORT_SYMBOL_GPL(iommu_map); | |
1371 | ||
7d3002cc | 1372 | size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) |
cefc53c7 | 1373 | { |
7d3002cc OBC |
1374 | size_t unmapped_page, unmapped = 0; |
1375 | unsigned int min_pagesz; | |
6fd492fd | 1376 | unsigned long orig_iova = iova; |
cefc53c7 | 1377 | |
57886518 | 1378 | if (unlikely(domain->ops->unmap == NULL || |
d16e0faa | 1379 | domain->pgsize_bitmap == 0UL)) |
e5aa7f00 JR |
1380 | return -ENODEV; |
1381 | ||
a10315e5 JR |
1382 | if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) |
1383 | return -EINVAL; | |
1384 | ||
7d3002cc | 1385 | /* find out the minimum page size supported */ |
d16e0faa | 1386 | min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
7d3002cc OBC |
1387 | |
1388 | /* | |
1389 | * The virtual address, as well as the size of the mapping, must be | |
1390 | * aligned (at least) to the size of the smallest page supported | |
1391 | * by the hardware | |
1392 | */ | |
1393 | if (!IS_ALIGNED(iova | size, min_pagesz)) { | |
6197ca82 JP |
1394 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n", |
1395 | iova, size, min_pagesz); | |
7d3002cc OBC |
1396 | return -EINVAL; |
1397 | } | |
1398 | ||
6197ca82 | 1399 | pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); |
7d3002cc OBC |
1400 | |
1401 | /* | |
1402 | * Keep iterating until we either unmap 'size' bytes (or more) | |
1403 | * or we hit an area that isn't mapped. | |
1404 | */ | |
1405 | while (unmapped < size) { | |
bd13969b | 1406 | size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); |
7d3002cc | 1407 | |
bd13969b | 1408 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); |
7d3002cc OBC |
1409 | if (!unmapped_page) |
1410 | break; | |
1411 | ||
6197ca82 JP |
1412 | pr_debug("unmapped: iova 0x%lx size 0x%zx\n", |
1413 | iova, unmapped_page); | |
7d3002cc OBC |
1414 | |
1415 | iova += unmapped_page; | |
1416 | unmapped += unmapped_page; | |
1417 | } | |
1418 | ||
db8614d3 | 1419 | trace_unmap(orig_iova, size, unmapped); |
7d3002cc | 1420 | return unmapped; |
cefc53c7 JR |
1421 | } |
1422 | EXPORT_SYMBOL_GPL(iommu_unmap); | |
1460432c | 1423 | |
315786eb OH |
1424 | size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, |
1425 | struct scatterlist *sg, unsigned int nents, int prot) | |
1426 | { | |
38ec010d | 1427 | struct scatterlist *s; |
315786eb | 1428 | size_t mapped = 0; |
18f23409 | 1429 | unsigned int i, min_pagesz; |
38ec010d | 1430 | int ret; |
315786eb | 1431 | |
d16e0faa | 1432 | if (unlikely(domain->pgsize_bitmap == 0UL)) |
18f23409 | 1433 | return 0; |
315786eb | 1434 | |
d16e0faa | 1435 | min_pagesz = 1 << __ffs(domain->pgsize_bitmap); |
18f23409 RM |
1436 | |
1437 | for_each_sg(sg, s, nents, i) { | |
3e6110fd | 1438 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; |
18f23409 RM |
1439 | |
1440 | /* | |
1441 | * We are mapping on IOMMU page boundaries, so offset within | |
1442 | * the page must be 0. However, the IOMMU may support pages | |
1443 | * smaller than PAGE_SIZE, so s->offset may still represent | |
1444 | * an offset of that boundary within the CPU page. | |
1445 | */ | |
1446 | if (!IS_ALIGNED(s->offset, min_pagesz)) | |
38ec010d JR |
1447 | goto out_err; |
1448 | ||
1449 | ret = iommu_map(domain, iova + mapped, phys, s->length, prot); | |
1450 | if (ret) | |
1451 | goto out_err; | |
1452 | ||
1453 | mapped += s->length; | |
315786eb OH |
1454 | } |
1455 | ||
1456 | return mapped; | |
38ec010d JR |
1457 | |
1458 | out_err: | |
1459 | /* undo mappings already done */ | |
1460 | iommu_unmap(domain, iova, mapped); | |
1461 | ||
1462 | return 0; | |
1463 | ||
315786eb OH |
1464 | } |
1465 | EXPORT_SYMBOL_GPL(default_iommu_map_sg); | |
d7787d57 JR |
1466 | |
1467 | int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, | |
80f97f0f | 1468 | phys_addr_t paddr, u64 size, int prot) |
d7787d57 JR |
1469 | { |
1470 | if (unlikely(domain->ops->domain_window_enable == NULL)) | |
1471 | return -ENODEV; | |
1472 | ||
80f97f0f VS |
1473 | return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, |
1474 | prot); | |
d7787d57 JR |
1475 | } |
1476 | EXPORT_SYMBOL_GPL(iommu_domain_window_enable); | |
1477 | ||
1478 | void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) | |
1479 | { | |
1480 | if (unlikely(domain->ops->domain_window_disable == NULL)) | |
1481 | return; | |
1482 | ||
1483 | return domain->ops->domain_window_disable(domain, wnd_nr); | |
1484 | } | |
1485 | EXPORT_SYMBOL_GPL(iommu_domain_window_disable); | |
1486 | ||
d72e31c9 | 1487 | static int __init iommu_init(void) |
1460432c | 1488 | { |
d72e31c9 AW |
1489 | iommu_group_kset = kset_create_and_add("iommu_groups", |
1490 | NULL, kernel_kobj); | |
d72e31c9 AW |
1491 | BUG_ON(!iommu_group_kset); |
1492 | ||
1493 | return 0; | |
1460432c | 1494 | } |
d7ef9995 | 1495 | core_initcall(iommu_init); |
0cd76dd1 JR |
1496 | |
1497 | int iommu_domain_get_attr(struct iommu_domain *domain, | |
1498 | enum iommu_attr attr, void *data) | |
1499 | { | |
0ff64f80 | 1500 | struct iommu_domain_geometry *geometry; |
d2e12160 | 1501 | bool *paging; |
0ff64f80 | 1502 | int ret = 0; |
69356712 | 1503 | u32 *count; |
0ff64f80 JR |
1504 | |
1505 | switch (attr) { | |
1506 | case DOMAIN_ATTR_GEOMETRY: | |
1507 | geometry = data; | |
1508 | *geometry = domain->geometry; | |
1509 | ||
d2e12160 JR |
1510 | break; |
1511 | case DOMAIN_ATTR_PAGING: | |
1512 | paging = data; | |
d16e0faa | 1513 | *paging = (domain->pgsize_bitmap != 0UL); |
69356712 JR |
1514 | break; |
1515 | case DOMAIN_ATTR_WINDOWS: | |
1516 | count = data; | |
1517 | ||
1518 | if (domain->ops->domain_get_windows != NULL) | |
1519 | *count = domain->ops->domain_get_windows(domain); | |
1520 | else | |
1521 | ret = -ENODEV; | |
1522 | ||
0ff64f80 JR |
1523 | break; |
1524 | default: | |
1525 | if (!domain->ops->domain_get_attr) | |
1526 | return -EINVAL; | |
0cd76dd1 | 1527 | |
0ff64f80 JR |
1528 | ret = domain->ops->domain_get_attr(domain, attr, data); |
1529 | } | |
1530 | ||
1531 | return ret; | |
0cd76dd1 JR |
1532 | } |
1533 | EXPORT_SYMBOL_GPL(iommu_domain_get_attr); | |
1534 | ||
1535 | int iommu_domain_set_attr(struct iommu_domain *domain, | |
1536 | enum iommu_attr attr, void *data) | |
1537 | { | |
69356712 JR |
1538 | int ret = 0; |
1539 | u32 *count; | |
1540 | ||
1541 | switch (attr) { | |
1542 | case DOMAIN_ATTR_WINDOWS: | |
1543 | count = data; | |
1544 | ||
1545 | if (domain->ops->domain_set_windows != NULL) | |
1546 | ret = domain->ops->domain_set_windows(domain, *count); | |
1547 | else | |
1548 | ret = -ENODEV; | |
1460432c | 1549 | |
69356712 JR |
1550 | break; |
1551 | default: | |
1552 | if (domain->ops->domain_set_attr == NULL) | |
1553 | return -EINVAL; | |
1554 | ||
1555 | ret = domain->ops->domain_set_attr(domain, attr, data); | |
1556 | } | |
1557 | ||
1558 | return ret; | |
1460432c | 1559 | } |
0cd76dd1 | 1560 | EXPORT_SYMBOL_GPL(iommu_domain_set_attr); |
a1015c2b JR |
1561 | |
1562 | void iommu_get_dm_regions(struct device *dev, struct list_head *list) | |
1563 | { | |
1564 | const struct iommu_ops *ops = dev->bus->iommu_ops; | |
1565 | ||
1566 | if (ops && ops->get_dm_regions) | |
1567 | ops->get_dm_regions(dev, list); | |
1568 | } | |
1569 | ||
1570 | void iommu_put_dm_regions(struct device *dev, struct list_head *list) | |
1571 | { | |
1572 | const struct iommu_ops *ops = dev->bus->iommu_ops; | |
1573 | ||
1574 | if (ops && ops->put_dm_regions) | |
1575 | ops->put_dm_regions(dev, list); | |
1576 | } | |
d290f1e7 JR |
1577 | |
1578 | /* Request that a device is direct mapped by the IOMMU */ | |
1579 | int iommu_request_dm_for_dev(struct device *dev) | |
1580 | { | |
1581 | struct iommu_domain *dm_domain; | |
1582 | struct iommu_group *group; | |
1583 | int ret; | |
1584 | ||
1585 | /* Device must already be in a group before calling this function */ | |
1586 | group = iommu_group_get_for_dev(dev); | |
409e553d DC |
1587 | if (IS_ERR(group)) |
1588 | return PTR_ERR(group); | |
d290f1e7 JR |
1589 | |
1590 | mutex_lock(&group->mutex); | |
1591 | ||
1592 | /* Check if the default domain is already direct mapped */ | |
1593 | ret = 0; | |
1594 | if (group->default_domain && | |
1595 | group->default_domain->type == IOMMU_DOMAIN_IDENTITY) | |
1596 | goto out; | |
1597 | ||
1598 | /* Don't change mappings of existing devices */ | |
1599 | ret = -EBUSY; | |
1600 | if (iommu_group_device_count(group) != 1) | |
1601 | goto out; | |
1602 | ||
1603 | /* Allocate a direct mapped domain */ | |
1604 | ret = -ENOMEM; | |
1605 | dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY); | |
1606 | if (!dm_domain) | |
1607 | goto out; | |
1608 | ||
1609 | /* Attach the device to the domain */ | |
1610 | ret = __iommu_attach_group(dm_domain, group); | |
1611 | if (ret) { | |
1612 | iommu_domain_free(dm_domain); | |
1613 | goto out; | |
1614 | } | |
1615 | ||
1616 | /* Make the direct mapped domain the default for this group */ | |
1617 | if (group->default_domain) | |
1618 | iommu_domain_free(group->default_domain); | |
1619 | group->default_domain = dm_domain; | |
1620 | ||
1621 | pr_info("Using direct mapping for device %s\n", dev_name(dev)); | |
1622 | ||
1623 | ret = 0; | |
1624 | out: | |
1625 | mutex_unlock(&group->mutex); | |
1626 | iommu_group_put(group); | |
1627 | ||
1628 | return ret; | |
1629 | } | |
57f98d2f RM |
1630 | |
1631 | int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, | |
1632 | const struct iommu_ops *ops) | |
1633 | { | |
1634 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | |
1635 | ||
1636 | if (fwspec) | |
1637 | return ops == fwspec->ops ? 0 : -EINVAL; | |
1638 | ||
1639 | fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); | |
1640 | if (!fwspec) | |
1641 | return -ENOMEM; | |
1642 | ||
1643 | of_node_get(to_of_node(iommu_fwnode)); | |
1644 | fwspec->iommu_fwnode = iommu_fwnode; | |
1645 | fwspec->ops = ops; | |
1646 | dev->iommu_fwspec = fwspec; | |
1647 | return 0; | |
1648 | } | |
1649 | EXPORT_SYMBOL_GPL(iommu_fwspec_init); | |
1650 | ||
1651 | void iommu_fwspec_free(struct device *dev) | |
1652 | { | |
1653 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | |
1654 | ||
1655 | if (fwspec) { | |
1656 | fwnode_handle_put(fwspec->iommu_fwnode); | |
1657 | kfree(fwspec); | |
1658 | dev->iommu_fwspec = NULL; | |
1659 | } | |
1660 | } | |
1661 | EXPORT_SYMBOL_GPL(iommu_fwspec_free); | |
1662 | ||
1663 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) | |
1664 | { | |
1665 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; | |
1666 | size_t size; | |
1667 | int i; | |
1668 | ||
1669 | if (!fwspec) | |
1670 | return -EINVAL; | |
1671 | ||
1672 | size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); | |
1673 | if (size > sizeof(*fwspec)) { | |
1674 | fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); | |
1675 | if (!fwspec) | |
1676 | return -ENOMEM; | |
1677 | } | |
1678 | ||
1679 | for (i = 0; i < num_ids; i++) | |
1680 | fwspec->ids[fwspec->num_ids + i] = ids[i]; | |
1681 | ||
1682 | fwspec->num_ids += num_ids; | |
1683 | dev->iommu_fwspec = fwspec; | |
1684 | return 0; | |
1685 | } | |
1686 | EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); |