]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/iommu.h
iommu: add IOMMU_PRIV attribute
[mirror_ubuntu-artful-kernel.git] / include / linux / iommu.h
1 /*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19 #ifndef __LINUX_IOMMU_H
20 #define __LINUX_IOMMU_H
21
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/of.h>
25 #include <linux/types.h>
26 #include <linux/scatterlist.h>
27 #include <trace/events/iommu.h>
28
29 #define IOMMU_READ (1 << 0)
30 #define IOMMU_WRITE (1 << 1)
31 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
32 #define IOMMU_NOEXEC (1 << 3)
33 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
34 /*
35 * This is to make the IOMMU API setup privileged
36 * mapppings accessible by the master only at higher
37 * privileged execution level and inaccessible at
38 * less privileged levels.
39 */
40 #define IOMMU_PRIV (1 << 5)
41
42 struct iommu_ops;
43 struct iommu_group;
44 struct bus_type;
45 struct device;
46 struct iommu_domain;
47 struct notifier_block;
48
49 /* iommu fault flags */
50 #define IOMMU_FAULT_READ 0x0
51 #define IOMMU_FAULT_WRITE 0x1
52
53 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
54 struct device *, unsigned long, int, void *);
55
56 struct iommu_domain_geometry {
57 dma_addr_t aperture_start; /* First address that can be mapped */
58 dma_addr_t aperture_end; /* Last address that can be mapped */
59 bool force_aperture; /* DMA only allowed in mappable range? */
60 };
61
62 /* Domain feature flags */
63 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
64 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
65 implementation */
66 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
67
68 /*
69 * This are the possible domain-types
70 *
71 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
72 * devices
73 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
74 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
75 * for VMs
76 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
77 * This flag allows IOMMU drivers to implement
78 * certain optimizations for these domains
79 */
80 #define IOMMU_DOMAIN_BLOCKED (0U)
81 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
82 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
83 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
84 __IOMMU_DOMAIN_DMA_API)
85
86 struct iommu_domain {
87 unsigned type;
88 const struct iommu_ops *ops;
89 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
90 iommu_fault_handler_t handler;
91 void *handler_token;
92 struct iommu_domain_geometry geometry;
93 void *iova_cookie;
94 };
95
96 enum iommu_cap {
97 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
98 transactions */
99 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
100 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
101 };
102
103 /*
104 * Following constraints are specifc to FSL_PAMUV1:
105 * -aperture must be power of 2, and naturally aligned
106 * -number of windows must be power of 2, and address space size
107 * of each window is determined by aperture size / # of windows
108 * -the actual size of the mapped region of a window must be power
109 * of 2 starting with 4KB and physical address must be naturally
110 * aligned.
111 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
112 * The caller can invoke iommu_domain_get_attr to check if the underlying
113 * iommu implementation supports these constraints.
114 */
115
116 enum iommu_attr {
117 DOMAIN_ATTR_GEOMETRY,
118 DOMAIN_ATTR_PAGING,
119 DOMAIN_ATTR_WINDOWS,
120 DOMAIN_ATTR_FSL_PAMU_STASH,
121 DOMAIN_ATTR_FSL_PAMU_ENABLE,
122 DOMAIN_ATTR_FSL_PAMUV1,
123 DOMAIN_ATTR_NESTING, /* two stages of translation */
124 DOMAIN_ATTR_MAX,
125 };
126
127 /**
128 * struct iommu_dm_region - descriptor for a direct mapped memory region
129 * @list: Linked list pointers
130 * @start: System physical start address of the region
131 * @length: Length of the region in bytes
132 * @prot: IOMMU Protection flags (READ/WRITE/...)
133 */
134 struct iommu_dm_region {
135 struct list_head list;
136 phys_addr_t start;
137 size_t length;
138 int prot;
139 };
140
141 #ifdef CONFIG_IOMMU_API
142
143 /**
144 * struct iommu_ops - iommu ops and capabilities
145 * @capable: check capability
146 * @domain_alloc: allocate iommu domain
147 * @domain_free: free iommu domain
148 * @attach_dev: attach device to an iommu domain
149 * @detach_dev: detach device from an iommu domain
150 * @map: map a physically contiguous memory region to an iommu domain
151 * @unmap: unmap a physically contiguous memory region from an iommu domain
152 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
153 * to an iommu domain
154 * @iova_to_phys: translate iova to physical address
155 * @add_device: add device to iommu grouping
156 * @remove_device: remove device from iommu grouping
157 * @device_group: find iommu group for a particular device
158 * @domain_get_attr: Query domain attributes
159 * @domain_set_attr: Change domain attributes
160 * @get_dm_regions: Request list of direct mapping requirements for a device
161 * @put_dm_regions: Free list of direct mapping requirements for a device
162 * @apply_dm_region: Temporary helper call-back for iova reserved ranges
163 * @domain_window_enable: Configure and enable a particular window for a domain
164 * @domain_window_disable: Disable a particular window for a domain
165 * @domain_set_windows: Set the number of windows for a domain
166 * @domain_get_windows: Return the number of windows for a domain
167 * @of_xlate: add OF master IDs to iommu grouping
168 * @pgsize_bitmap: bitmap of all possible supported page sizes
169 */
170 struct iommu_ops {
171 bool (*capable)(enum iommu_cap);
172
173 /* Domain allocation and freeing by the iommu driver */
174 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
175 void (*domain_free)(struct iommu_domain *);
176
177 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
178 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
179 int (*map)(struct iommu_domain *domain, unsigned long iova,
180 phys_addr_t paddr, size_t size, int prot);
181 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
182 size_t size);
183 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
184 struct scatterlist *sg, unsigned int nents, int prot);
185 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
186 int (*add_device)(struct device *dev);
187 void (*remove_device)(struct device *dev);
188 struct iommu_group *(*device_group)(struct device *dev);
189 int (*domain_get_attr)(struct iommu_domain *domain,
190 enum iommu_attr attr, void *data);
191 int (*domain_set_attr)(struct iommu_domain *domain,
192 enum iommu_attr attr, void *data);
193
194 /* Request/Free a list of direct mapping requirements for a device */
195 void (*get_dm_regions)(struct device *dev, struct list_head *list);
196 void (*put_dm_regions)(struct device *dev, struct list_head *list);
197 void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
198 struct iommu_dm_region *region);
199
200 /* Window handling functions */
201 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
202 phys_addr_t paddr, u64 size, int prot);
203 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
204 /* Set the number of windows per domain */
205 int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
206 /* Get the number of windows per domain */
207 u32 (*domain_get_windows)(struct iommu_domain *domain);
208
209 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
210
211 unsigned long pgsize_bitmap;
212 };
213
214 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
215 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
216 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
217 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
218 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
219 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
220
221 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
222 extern bool iommu_present(struct bus_type *bus);
223 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
224 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
225 extern struct iommu_group *iommu_group_get_by_id(int id);
226 extern void iommu_domain_free(struct iommu_domain *domain);
227 extern int iommu_attach_device(struct iommu_domain *domain,
228 struct device *dev);
229 extern void iommu_detach_device(struct iommu_domain *domain,
230 struct device *dev);
231 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
232 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
233 phys_addr_t paddr, size_t size, int prot);
234 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
235 size_t size);
236 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
237 struct scatterlist *sg,unsigned int nents,
238 int prot);
239 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
240 extern void iommu_set_fault_handler(struct iommu_domain *domain,
241 iommu_fault_handler_t handler, void *token);
242
243 extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
244 extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
245 extern int iommu_request_dm_for_dev(struct device *dev);
246
247 extern int iommu_attach_group(struct iommu_domain *domain,
248 struct iommu_group *group);
249 extern void iommu_detach_group(struct iommu_domain *domain,
250 struct iommu_group *group);
251 extern struct iommu_group *iommu_group_alloc(void);
252 extern void *iommu_group_get_iommudata(struct iommu_group *group);
253 extern void iommu_group_set_iommudata(struct iommu_group *group,
254 void *iommu_data,
255 void (*release)(void *iommu_data));
256 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
257 extern int iommu_group_add_device(struct iommu_group *group,
258 struct device *dev);
259 extern void iommu_group_remove_device(struct device *dev);
260 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
261 int (*fn)(struct device *, void *));
262 extern struct iommu_group *iommu_group_get(struct device *dev);
263 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
264 extern void iommu_group_put(struct iommu_group *group);
265 extern int iommu_group_register_notifier(struct iommu_group *group,
266 struct notifier_block *nb);
267 extern int iommu_group_unregister_notifier(struct iommu_group *group,
268 struct notifier_block *nb);
269 extern int iommu_group_id(struct iommu_group *group);
270 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
271 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
272
273 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
274 void *data);
275 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
276 void *data);
277 struct device *iommu_device_create(struct device *parent, void *drvdata,
278 const struct attribute_group **groups,
279 const char *fmt, ...) __printf(4, 5);
280 void iommu_device_destroy(struct device *dev);
281 int iommu_device_link(struct device *dev, struct device *link);
282 void iommu_device_unlink(struct device *dev, struct device *link);
283
284 /* Window handling function prototypes */
285 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
286 phys_addr_t offset, u64 size,
287 int prot);
288 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
289 /**
290 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
291 * @domain: the iommu domain where the fault has happened
292 * @dev: the device where the fault has happened
293 * @iova: the faulting address
294 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
295 *
296 * This function should be called by the low-level IOMMU implementations
297 * whenever IOMMU faults happen, to allow high-level users, that are
298 * interested in such events, to know about them.
299 *
300 * This event may be useful for several possible use cases:
301 * - mere logging of the event
302 * - dynamic TLB/PTE loading
303 * - if restarting of the faulting device is required
304 *
305 * Returns 0 on success and an appropriate error code otherwise (if dynamic
306 * PTE/TLB loading will one day be supported, implementations will be able
307 * to tell whether it succeeded or not according to this return value).
308 *
309 * Specifically, -ENOSYS is returned if a fault handler isn't installed
310 * (though fault handlers can also return -ENOSYS, in case they want to
311 * elicit the default behavior of the IOMMU drivers).
312 */
313 static inline int report_iommu_fault(struct iommu_domain *domain,
314 struct device *dev, unsigned long iova, int flags)
315 {
316 int ret = -ENOSYS;
317
318 /*
319 * if upper layers showed interest and installed a fault handler,
320 * invoke it.
321 */
322 if (domain->handler)
323 ret = domain->handler(domain, dev, iova, flags,
324 domain->handler_token);
325
326 trace_io_page_fault(dev, iova, flags);
327 return ret;
328 }
329
330 static inline size_t iommu_map_sg(struct iommu_domain *domain,
331 unsigned long iova, struct scatterlist *sg,
332 unsigned int nents, int prot)
333 {
334 return domain->ops->map_sg(domain, iova, sg, nents, prot);
335 }
336
337 /* PCI device grouping function */
338 extern struct iommu_group *pci_device_group(struct device *dev);
339 /* Generic device grouping function */
340 extern struct iommu_group *generic_device_group(struct device *dev);
341
342 /**
343 * struct iommu_fwspec - per-device IOMMU instance data
344 * @ops: ops for this device's IOMMU
345 * @iommu_fwnode: firmware handle for this device's IOMMU
346 * @iommu_priv: IOMMU driver private data for this device
347 * @num_ids: number of associated device IDs
348 * @ids: IDs which this device may present to the IOMMU
349 */
350 struct iommu_fwspec {
351 const struct iommu_ops *ops;
352 struct fwnode_handle *iommu_fwnode;
353 void *iommu_priv;
354 unsigned int num_ids;
355 u32 ids[1];
356 };
357
358 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
359 const struct iommu_ops *ops);
360 void iommu_fwspec_free(struct device *dev);
361 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
362 void iommu_register_instance(struct fwnode_handle *fwnode,
363 const struct iommu_ops *ops);
364 const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
365
366 #else /* CONFIG_IOMMU_API */
367
368 struct iommu_ops {};
369 struct iommu_group {};
370 struct iommu_fwspec {};
371
372 static inline bool iommu_present(struct bus_type *bus)
373 {
374 return false;
375 }
376
377 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
378 {
379 return false;
380 }
381
382 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
383 {
384 return NULL;
385 }
386
387 static inline struct iommu_group *iommu_group_get_by_id(int id)
388 {
389 return NULL;
390 }
391
392 static inline void iommu_domain_free(struct iommu_domain *domain)
393 {
394 }
395
396 static inline int iommu_attach_device(struct iommu_domain *domain,
397 struct device *dev)
398 {
399 return -ENODEV;
400 }
401
402 static inline void iommu_detach_device(struct iommu_domain *domain,
403 struct device *dev)
404 {
405 }
406
407 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
408 {
409 return NULL;
410 }
411
412 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
413 phys_addr_t paddr, int gfp_order, int prot)
414 {
415 return -ENODEV;
416 }
417
418 static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
419 int gfp_order)
420 {
421 return -ENODEV;
422 }
423
424 static inline size_t iommu_map_sg(struct iommu_domain *domain,
425 unsigned long iova, struct scatterlist *sg,
426 unsigned int nents, int prot)
427 {
428 return -ENODEV;
429 }
430
431 static inline int iommu_domain_window_enable(struct iommu_domain *domain,
432 u32 wnd_nr, phys_addr_t paddr,
433 u64 size, int prot)
434 {
435 return -ENODEV;
436 }
437
438 static inline void iommu_domain_window_disable(struct iommu_domain *domain,
439 u32 wnd_nr)
440 {
441 }
442
443 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
444 {
445 return 0;
446 }
447
448 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
449 iommu_fault_handler_t handler, void *token)
450 {
451 }
452
453 static inline void iommu_get_dm_regions(struct device *dev,
454 struct list_head *list)
455 {
456 }
457
458 static inline void iommu_put_dm_regions(struct device *dev,
459 struct list_head *list)
460 {
461 }
462
463 static inline int iommu_request_dm_for_dev(struct device *dev)
464 {
465 return -ENODEV;
466 }
467
468 static inline int iommu_attach_group(struct iommu_domain *domain,
469 struct iommu_group *group)
470 {
471 return -ENODEV;
472 }
473
474 static inline void iommu_detach_group(struct iommu_domain *domain,
475 struct iommu_group *group)
476 {
477 }
478
479 static inline struct iommu_group *iommu_group_alloc(void)
480 {
481 return ERR_PTR(-ENODEV);
482 }
483
484 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
485 {
486 return NULL;
487 }
488
489 static inline void iommu_group_set_iommudata(struct iommu_group *group,
490 void *iommu_data,
491 void (*release)(void *iommu_data))
492 {
493 }
494
495 static inline int iommu_group_set_name(struct iommu_group *group,
496 const char *name)
497 {
498 return -ENODEV;
499 }
500
501 static inline int iommu_group_add_device(struct iommu_group *group,
502 struct device *dev)
503 {
504 return -ENODEV;
505 }
506
507 static inline void iommu_group_remove_device(struct device *dev)
508 {
509 }
510
511 static inline int iommu_group_for_each_dev(struct iommu_group *group,
512 void *data,
513 int (*fn)(struct device *, void *))
514 {
515 return -ENODEV;
516 }
517
518 static inline struct iommu_group *iommu_group_get(struct device *dev)
519 {
520 return NULL;
521 }
522
523 static inline void iommu_group_put(struct iommu_group *group)
524 {
525 }
526
527 static inline int iommu_group_register_notifier(struct iommu_group *group,
528 struct notifier_block *nb)
529 {
530 return -ENODEV;
531 }
532
533 static inline int iommu_group_unregister_notifier(struct iommu_group *group,
534 struct notifier_block *nb)
535 {
536 return 0;
537 }
538
539 static inline int iommu_group_id(struct iommu_group *group)
540 {
541 return -ENODEV;
542 }
543
544 static inline int iommu_domain_get_attr(struct iommu_domain *domain,
545 enum iommu_attr attr, void *data)
546 {
547 return -EINVAL;
548 }
549
550 static inline int iommu_domain_set_attr(struct iommu_domain *domain,
551 enum iommu_attr attr, void *data)
552 {
553 return -EINVAL;
554 }
555
556 static inline struct device *iommu_device_create(struct device *parent,
557 void *drvdata,
558 const struct attribute_group **groups,
559 const char *fmt, ...)
560 {
561 return ERR_PTR(-ENODEV);
562 }
563
564 static inline void iommu_device_destroy(struct device *dev)
565 {
566 }
567
568 static inline int iommu_device_link(struct device *dev, struct device *link)
569 {
570 return -EINVAL;
571 }
572
573 static inline void iommu_device_unlink(struct device *dev, struct device *link)
574 {
575 }
576
577 static inline int iommu_fwspec_init(struct device *dev,
578 struct fwnode_handle *iommu_fwnode,
579 const struct iommu_ops *ops)
580 {
581 return -ENODEV;
582 }
583
584 static inline void iommu_fwspec_free(struct device *dev)
585 {
586 }
587
588 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
589 int num_ids)
590 {
591 return -ENODEV;
592 }
593
594 static inline void iommu_register_instance(struct fwnode_handle *fwnode,
595 const struct iommu_ops *ops)
596 {
597 }
598
599 static inline
600 const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
601 {
602 return NULL;
603 }
604
605 #endif /* CONFIG_IOMMU_API */
606
607 #endif /* __LINUX_IOMMU_H */