2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/ratelimit.h>
21 #include <linux/pci.h>
22 #include <linux/pci-ats.h>
23 #include <linux/bitmap.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/iommu.h>
30 #include <linux/delay.h>
31 #include <linux/amd-iommu.h>
32 #include <linux/notifier.h>
33 #include <linux/export.h>
34 #include <linux/irq.h>
35 #include <linux/msi.h>
36 #include <asm/irq_remapping.h>
37 #include <asm/io_apic.h>
39 #include <asm/hw_irq.h>
40 #include <asm/msidef.h>
41 #include <asm/proto.h>
42 #include <asm/iommu.h>
46 #include "amd_iommu_proto.h"
47 #include "amd_iommu_types.h"
49 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
51 #define LOOP_TIMEOUT 100000
54 * This bitmap is used to advertise the page sizes our hardware support
55 * to the IOMMU core, which will then use this information to split
56 * physically contiguous memory regions it is mapping into page sizes
59 * Traditionally the IOMMU core just handed us the mappings directly,
60 * after making sure the size is an order of a 4KiB page and that the
61 * mapping has natural alignment.
63 * To retain this behavior, we currently advertise that we support
64 * all page sizes that are an order of 4KiB.
66 * If at some point we'd like to utilize the IOMMU core's new behavior,
67 * we could change this to advertise the real page sizes we support.
69 #define AMD_IOMMU_PGSIZES (~0xFFFUL)
71 static DEFINE_RWLOCK(amd_iommu_devtable_lock
);
73 /* A list of preallocated protection domains */
74 static LIST_HEAD(iommu_pd_list
);
75 static DEFINE_SPINLOCK(iommu_pd_list_lock
);
77 /* List of all available dev_data structures */
78 static LIST_HEAD(dev_data_list
);
79 static DEFINE_SPINLOCK(dev_data_list_lock
);
81 LIST_HEAD(ioapic_map
);
85 * Domain for untranslated devices - only allocated
86 * if iommu=pt passed on kernel cmd line.
88 static struct protection_domain
*pt_domain
;
90 static struct iommu_ops amd_iommu_ops
;
92 static ATOMIC_NOTIFIER_HEAD(ppr_notifier
);
93 int amd_iommu_max_glx_val
= -1;
95 static struct dma_map_ops amd_iommu_dma_ops
;
98 * general struct to manage commands send to an IOMMU
104 struct kmem_cache
*amd_iommu_irq_cache
;
106 static void update_domain(struct protection_domain
*domain
);
107 static int __init
alloc_passthrough_domain(void);
109 /****************************************************************************
113 ****************************************************************************/
115 static struct iommu_dev_data
*alloc_dev_data(u16 devid
)
117 struct iommu_dev_data
*dev_data
;
120 dev_data
= kzalloc(sizeof(*dev_data
), GFP_KERNEL
);
124 dev_data
->devid
= devid
;
125 atomic_set(&dev_data
->bind
, 0);
127 spin_lock_irqsave(&dev_data_list_lock
, flags
);
128 list_add_tail(&dev_data
->dev_data_list
, &dev_data_list
);
129 spin_unlock_irqrestore(&dev_data_list_lock
, flags
);
134 static void free_dev_data(struct iommu_dev_data
*dev_data
)
138 spin_lock_irqsave(&dev_data_list_lock
, flags
);
139 list_del(&dev_data
->dev_data_list
);
140 spin_unlock_irqrestore(&dev_data_list_lock
, flags
);
145 static struct iommu_dev_data
*search_dev_data(u16 devid
)
147 struct iommu_dev_data
*dev_data
;
150 spin_lock_irqsave(&dev_data_list_lock
, flags
);
151 list_for_each_entry(dev_data
, &dev_data_list
, dev_data_list
) {
152 if (dev_data
->devid
== devid
)
159 spin_unlock_irqrestore(&dev_data_list_lock
, flags
);
164 static struct iommu_dev_data
*find_dev_data(u16 devid
)
166 struct iommu_dev_data
*dev_data
;
168 dev_data
= search_dev_data(devid
);
170 if (dev_data
== NULL
)
171 dev_data
= alloc_dev_data(devid
);
176 static inline u16
get_device_id(struct device
*dev
)
178 struct pci_dev
*pdev
= to_pci_dev(dev
);
180 return calc_devid(pdev
->bus
->number
, pdev
->devfn
);
183 static struct iommu_dev_data
*get_dev_data(struct device
*dev
)
185 return dev
->archdata
.iommu
;
188 static bool pci_iommuv2_capable(struct pci_dev
*pdev
)
190 static const int caps
[] = {
193 PCI_EXT_CAP_ID_PASID
,
197 for (i
= 0; i
< 3; ++i
) {
198 pos
= pci_find_ext_capability(pdev
, caps
[i
]);
206 static bool pdev_pri_erratum(struct pci_dev
*pdev
, u32 erratum
)
208 struct iommu_dev_data
*dev_data
;
210 dev_data
= get_dev_data(&pdev
->dev
);
212 return dev_data
->errata
& (1 << erratum
) ? true : false;
216 * In this function the list of preallocated protection domains is traversed to
217 * find the domain for a specific device
219 static struct dma_ops_domain
*find_protection_domain(u16 devid
)
221 struct dma_ops_domain
*entry
, *ret
= NULL
;
223 u16 alias
= amd_iommu_alias_table
[devid
];
225 if (list_empty(&iommu_pd_list
))
228 spin_lock_irqsave(&iommu_pd_list_lock
, flags
);
230 list_for_each_entry(entry
, &iommu_pd_list
, list
) {
231 if (entry
->target_dev
== devid
||
232 entry
->target_dev
== alias
) {
238 spin_unlock_irqrestore(&iommu_pd_list_lock
, flags
);
244 * This function checks if the driver got a valid device from the caller to
245 * avoid dereferencing invalid pointers.
247 static bool check_device(struct device
*dev
)
251 if (!dev
|| !dev
->dma_mask
)
254 /* No device or no PCI device */
255 if (dev
->bus
!= &pci_bus_type
)
258 devid
= get_device_id(dev
);
260 /* Out of our scope? */
261 if (devid
> amd_iommu_last_bdf
)
264 if (amd_iommu_rlookup_table
[devid
] == NULL
)
270 static void swap_pci_ref(struct pci_dev
**from
, struct pci_dev
*to
)
276 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
278 static int iommu_init_device(struct device
*dev
)
280 struct pci_dev
*dma_pdev
, *pdev
= to_pci_dev(dev
);
281 struct iommu_dev_data
*dev_data
;
282 struct iommu_group
*group
;
286 if (dev
->archdata
.iommu
)
289 dev_data
= find_dev_data(get_device_id(dev
));
293 alias
= amd_iommu_alias_table
[dev_data
->devid
];
294 if (alias
!= dev_data
->devid
) {
295 struct iommu_dev_data
*alias_data
;
297 alias_data
= find_dev_data(alias
);
298 if (alias_data
== NULL
) {
299 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
301 free_dev_data(dev_data
);
304 dev_data
->alias_data
= alias_data
;
306 dma_pdev
= pci_get_bus_and_slot(alias
>> 8, alias
& 0xff);
308 dma_pdev
= pci_dev_get(pdev
);
310 /* Account for quirked devices */
311 swap_pci_ref(&dma_pdev
, pci_get_dma_source(dma_pdev
));
314 * If it's a multifunction device that does not support our
315 * required ACS flags, add to the same group as function 0.
317 if (dma_pdev
->multifunction
&&
318 !pci_acs_enabled(dma_pdev
, REQ_ACS_FLAGS
))
319 swap_pci_ref(&dma_pdev
,
320 pci_get_slot(dma_pdev
->bus
,
321 PCI_DEVFN(PCI_SLOT(dma_pdev
->devfn
),
325 * Devices on the root bus go through the iommu. If that's not us,
326 * find the next upstream device and test ACS up to the root bus.
327 * Finding the next device may require skipping virtual buses.
329 while (!pci_is_root_bus(dma_pdev
->bus
)) {
330 struct pci_bus
*bus
= dma_pdev
->bus
;
333 if (!pci_is_root_bus(bus
))
339 if (pci_acs_path_enabled(bus
->self
, NULL
, REQ_ACS_FLAGS
))
342 swap_pci_ref(&dma_pdev
, pci_dev_get(bus
->self
));
346 group
= iommu_group_get(&dma_pdev
->dev
);
347 pci_dev_put(dma_pdev
);
349 group
= iommu_group_alloc();
351 return PTR_ERR(group
);
354 ret
= iommu_group_add_device(group
, dev
);
356 iommu_group_put(group
);
361 if (pci_iommuv2_capable(pdev
)) {
362 struct amd_iommu
*iommu
;
364 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
365 dev_data
->iommu_v2
= iommu
->is_iommu_v2
;
368 dev
->archdata
.iommu
= dev_data
;
373 static void iommu_ignore_device(struct device
*dev
)
377 devid
= get_device_id(dev
);
378 alias
= amd_iommu_alias_table
[devid
];
380 memset(&amd_iommu_dev_table
[devid
], 0, sizeof(struct dev_table_entry
));
381 memset(&amd_iommu_dev_table
[alias
], 0, sizeof(struct dev_table_entry
));
383 amd_iommu_rlookup_table
[devid
] = NULL
;
384 amd_iommu_rlookup_table
[alias
] = NULL
;
387 static void iommu_uninit_device(struct device
*dev
)
389 iommu_group_remove_device(dev
);
392 * Nothing to do here - we keep dev_data around for unplugged devices
393 * and reuse it when the device is re-plugged - not doing so would
394 * introduce a ton of races.
398 void __init
amd_iommu_uninit_devices(void)
400 struct iommu_dev_data
*dev_data
, *n
;
401 struct pci_dev
*pdev
= NULL
;
403 for_each_pci_dev(pdev
) {
405 if (!check_device(&pdev
->dev
))
408 iommu_uninit_device(&pdev
->dev
);
411 /* Free all of our dev_data structures */
412 list_for_each_entry_safe(dev_data
, n
, &dev_data_list
, dev_data_list
)
413 free_dev_data(dev_data
);
416 int __init
amd_iommu_init_devices(void)
418 struct pci_dev
*pdev
= NULL
;
421 for_each_pci_dev(pdev
) {
423 if (!check_device(&pdev
->dev
))
426 ret
= iommu_init_device(&pdev
->dev
);
427 if (ret
== -ENOTSUPP
)
428 iommu_ignore_device(&pdev
->dev
);
437 amd_iommu_uninit_devices();
441 #ifdef CONFIG_AMD_IOMMU_STATS
444 * Initialization code for statistics collection
447 DECLARE_STATS_COUNTER(compl_wait
);
448 DECLARE_STATS_COUNTER(cnt_map_single
);
449 DECLARE_STATS_COUNTER(cnt_unmap_single
);
450 DECLARE_STATS_COUNTER(cnt_map_sg
);
451 DECLARE_STATS_COUNTER(cnt_unmap_sg
);
452 DECLARE_STATS_COUNTER(cnt_alloc_coherent
);
453 DECLARE_STATS_COUNTER(cnt_free_coherent
);
454 DECLARE_STATS_COUNTER(cross_page
);
455 DECLARE_STATS_COUNTER(domain_flush_single
);
456 DECLARE_STATS_COUNTER(domain_flush_all
);
457 DECLARE_STATS_COUNTER(alloced_io_mem
);
458 DECLARE_STATS_COUNTER(total_map_requests
);
459 DECLARE_STATS_COUNTER(complete_ppr
);
460 DECLARE_STATS_COUNTER(invalidate_iotlb
);
461 DECLARE_STATS_COUNTER(invalidate_iotlb_all
);
462 DECLARE_STATS_COUNTER(pri_requests
);
464 static struct dentry
*stats_dir
;
465 static struct dentry
*de_fflush
;
467 static void amd_iommu_stats_add(struct __iommu_counter
*cnt
)
469 if (stats_dir
== NULL
)
472 cnt
->dent
= debugfs_create_u64(cnt
->name
, 0444, stats_dir
,
476 static void amd_iommu_stats_init(void)
478 stats_dir
= debugfs_create_dir("amd-iommu", NULL
);
479 if (stats_dir
== NULL
)
482 de_fflush
= debugfs_create_bool("fullflush", 0444, stats_dir
,
483 &amd_iommu_unmap_flush
);
485 amd_iommu_stats_add(&compl_wait
);
486 amd_iommu_stats_add(&cnt_map_single
);
487 amd_iommu_stats_add(&cnt_unmap_single
);
488 amd_iommu_stats_add(&cnt_map_sg
);
489 amd_iommu_stats_add(&cnt_unmap_sg
);
490 amd_iommu_stats_add(&cnt_alloc_coherent
);
491 amd_iommu_stats_add(&cnt_free_coherent
);
492 amd_iommu_stats_add(&cross_page
);
493 amd_iommu_stats_add(&domain_flush_single
);
494 amd_iommu_stats_add(&domain_flush_all
);
495 amd_iommu_stats_add(&alloced_io_mem
);
496 amd_iommu_stats_add(&total_map_requests
);
497 amd_iommu_stats_add(&complete_ppr
);
498 amd_iommu_stats_add(&invalidate_iotlb
);
499 amd_iommu_stats_add(&invalidate_iotlb_all
);
500 amd_iommu_stats_add(&pri_requests
);
505 /****************************************************************************
507 * Interrupt handling functions
509 ****************************************************************************/
511 static void dump_dte_entry(u16 devid
)
515 for (i
= 0; i
< 4; ++i
)
516 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i
,
517 amd_iommu_dev_table
[devid
].data
[i
]);
520 static void dump_command(unsigned long phys_addr
)
522 struct iommu_cmd
*cmd
= phys_to_virt(phys_addr
);
525 for (i
= 0; i
< 4; ++i
)
526 pr_err("AMD-Vi: CMD[%d]: %08x\n", i
, cmd
->data
[i
]);
529 static void iommu_print_event(struct amd_iommu
*iommu
, void *__evt
)
531 int type
, devid
, domid
, flags
;
532 volatile u32
*event
= __evt
;
537 type
= (event
[1] >> EVENT_TYPE_SHIFT
) & EVENT_TYPE_MASK
;
538 devid
= (event
[0] >> EVENT_DEVID_SHIFT
) & EVENT_DEVID_MASK
;
539 domid
= (event
[1] >> EVENT_DOMID_SHIFT
) & EVENT_DOMID_MASK
;
540 flags
= (event
[1] >> EVENT_FLAGS_SHIFT
) & EVENT_FLAGS_MASK
;
541 address
= (u64
)(((u64
)event
[3]) << 32) | event
[2];
544 /* Did we hit the erratum? */
545 if (++count
== LOOP_TIMEOUT
) {
546 pr_err("AMD-Vi: No event written to event log\n");
553 printk(KERN_ERR
"AMD-Vi: Event logged [");
556 case EVENT_TYPE_ILL_DEV
:
557 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
558 "address=0x%016llx flags=0x%04x]\n",
559 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
561 dump_dte_entry(devid
);
563 case EVENT_TYPE_IO_FAULT
:
564 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
565 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
566 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
567 domid
, address
, flags
);
569 case EVENT_TYPE_DEV_TAB_ERR
:
570 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
571 "address=0x%016llx flags=0x%04x]\n",
572 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
575 case EVENT_TYPE_PAGE_TAB_ERR
:
576 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
577 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
578 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
579 domid
, address
, flags
);
581 case EVENT_TYPE_ILL_CMD
:
582 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address
);
583 dump_command(address
);
585 case EVENT_TYPE_CMD_HARD_ERR
:
586 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
587 "flags=0x%04x]\n", address
, flags
);
589 case EVENT_TYPE_IOTLB_INV_TO
:
590 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
591 "address=0x%016llx]\n",
592 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
595 case EVENT_TYPE_INV_DEV_REQ
:
596 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
597 "address=0x%016llx flags=0x%04x]\n",
598 PCI_BUS(devid
), PCI_SLOT(devid
), PCI_FUNC(devid
),
602 printk(KERN_ERR
"UNKNOWN type=0x%02x]\n", type
);
605 memset(__evt
, 0, 4 * sizeof(u32
));
608 static void iommu_poll_events(struct amd_iommu
*iommu
)
613 spin_lock_irqsave(&iommu
->lock
, flags
);
615 head
= readl(iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
616 tail
= readl(iommu
->mmio_base
+ MMIO_EVT_TAIL_OFFSET
);
618 while (head
!= tail
) {
619 iommu_print_event(iommu
, iommu
->evt_buf
+ head
);
620 head
= (head
+ EVENT_ENTRY_SIZE
) % iommu
->evt_buf_size
;
623 writel(head
, iommu
->mmio_base
+ MMIO_EVT_HEAD_OFFSET
);
625 spin_unlock_irqrestore(&iommu
->lock
, flags
);
628 static void iommu_handle_ppr_entry(struct amd_iommu
*iommu
, u64
*raw
)
630 struct amd_iommu_fault fault
;
632 INC_STATS_COUNTER(pri_requests
);
634 if (PPR_REQ_TYPE(raw
[0]) != PPR_REQ_FAULT
) {
635 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
639 fault
.address
= raw
[1];
640 fault
.pasid
= PPR_PASID(raw
[0]);
641 fault
.device_id
= PPR_DEVID(raw
[0]);
642 fault
.tag
= PPR_TAG(raw
[0]);
643 fault
.flags
= PPR_FLAGS(raw
[0]);
645 atomic_notifier_call_chain(&ppr_notifier
, 0, &fault
);
648 static void iommu_poll_ppr_log(struct amd_iommu
*iommu
)
653 if (iommu
->ppr_log
== NULL
)
656 /* enable ppr interrupts again */
657 writel(MMIO_STATUS_PPR_INT_MASK
, iommu
->mmio_base
+ MMIO_STATUS_OFFSET
);
659 spin_lock_irqsave(&iommu
->lock
, flags
);
661 head
= readl(iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
662 tail
= readl(iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
664 while (head
!= tail
) {
669 raw
= (u64
*)(iommu
->ppr_log
+ head
);
672 * Hardware bug: Interrupt may arrive before the entry is
673 * written to memory. If this happens we need to wait for the
676 for (i
= 0; i
< LOOP_TIMEOUT
; ++i
) {
677 if (PPR_REQ_TYPE(raw
[0]) != 0)
682 /* Avoid memcpy function-call overhead */
687 * To detect the hardware bug we need to clear the entry
690 raw
[0] = raw
[1] = 0UL;
692 /* Update head pointer of hardware ring-buffer */
693 head
= (head
+ PPR_ENTRY_SIZE
) % PPR_LOG_SIZE
;
694 writel(head
, iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
697 * Release iommu->lock because ppr-handling might need to
700 spin_unlock_irqrestore(&iommu
->lock
, flags
);
702 /* Handle PPR entry */
703 iommu_handle_ppr_entry(iommu
, entry
);
705 spin_lock_irqsave(&iommu
->lock
, flags
);
707 /* Refresh ring-buffer information */
708 head
= readl(iommu
->mmio_base
+ MMIO_PPR_HEAD_OFFSET
);
709 tail
= readl(iommu
->mmio_base
+ MMIO_PPR_TAIL_OFFSET
);
712 spin_unlock_irqrestore(&iommu
->lock
, flags
);
715 irqreturn_t
amd_iommu_int_thread(int irq
, void *data
)
717 struct amd_iommu
*iommu
;
719 for_each_iommu(iommu
) {
720 iommu_poll_events(iommu
);
721 iommu_poll_ppr_log(iommu
);
727 irqreturn_t
amd_iommu_int_handler(int irq
, void *data
)
729 return IRQ_WAKE_THREAD
;
732 /****************************************************************************
734 * IOMMU command queuing functions
736 ****************************************************************************/
738 static int wait_on_sem(volatile u64
*sem
)
742 while (*sem
== 0 && i
< LOOP_TIMEOUT
) {
747 if (i
== LOOP_TIMEOUT
) {
748 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
755 static void copy_cmd_to_buffer(struct amd_iommu
*iommu
,
756 struct iommu_cmd
*cmd
,
761 target
= iommu
->cmd_buf
+ tail
;
762 tail
= (tail
+ sizeof(*cmd
)) % iommu
->cmd_buf_size
;
764 /* Copy command to buffer */
765 memcpy(target
, cmd
, sizeof(*cmd
));
767 /* Tell the IOMMU about it */
768 writel(tail
, iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
771 static void build_completion_wait(struct iommu_cmd
*cmd
, u64 address
)
773 WARN_ON(address
& 0x7ULL
);
775 memset(cmd
, 0, sizeof(*cmd
));
776 cmd
->data
[0] = lower_32_bits(__pa(address
)) | CMD_COMPL_WAIT_STORE_MASK
;
777 cmd
->data
[1] = upper_32_bits(__pa(address
));
779 CMD_SET_TYPE(cmd
, CMD_COMPL_WAIT
);
782 static void build_inv_dte(struct iommu_cmd
*cmd
, u16 devid
)
784 memset(cmd
, 0, sizeof(*cmd
));
785 cmd
->data
[0] = devid
;
786 CMD_SET_TYPE(cmd
, CMD_INV_DEV_ENTRY
);
789 static void build_inv_iommu_pages(struct iommu_cmd
*cmd
, u64 address
,
790 size_t size
, u16 domid
, int pde
)
795 pages
= iommu_num_pages(address
, size
, PAGE_SIZE
);
800 * If we have to flush more than one page, flush all
801 * TLB entries for this domain
803 address
= CMD_INV_IOMMU_ALL_PAGES_ADDRESS
;
807 address
&= PAGE_MASK
;
809 memset(cmd
, 0, sizeof(*cmd
));
810 cmd
->data
[1] |= domid
;
811 cmd
->data
[2] = lower_32_bits(address
);
812 cmd
->data
[3] = upper_32_bits(address
);
813 CMD_SET_TYPE(cmd
, CMD_INV_IOMMU_PAGES
);
814 if (s
) /* size bit - we flush more than one 4kb page */
815 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
816 if (pde
) /* PDE bit - we wan't flush everything not only the PTEs */
817 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK
;
820 static void build_inv_iotlb_pages(struct iommu_cmd
*cmd
, u16 devid
, int qdep
,
821 u64 address
, size_t size
)
826 pages
= iommu_num_pages(address
, size
, PAGE_SIZE
);
831 * If we have to flush more than one page, flush all
832 * TLB entries for this domain
834 address
= CMD_INV_IOMMU_ALL_PAGES_ADDRESS
;
838 address
&= PAGE_MASK
;
840 memset(cmd
, 0, sizeof(*cmd
));
841 cmd
->data
[0] = devid
;
842 cmd
->data
[0] |= (qdep
& 0xff) << 24;
843 cmd
->data
[1] = devid
;
844 cmd
->data
[2] = lower_32_bits(address
);
845 cmd
->data
[3] = upper_32_bits(address
);
846 CMD_SET_TYPE(cmd
, CMD_INV_IOTLB_PAGES
);
848 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
851 static void build_inv_iommu_pasid(struct iommu_cmd
*cmd
, u16 domid
, int pasid
,
852 u64 address
, bool size
)
854 memset(cmd
, 0, sizeof(*cmd
));
856 address
&= ~(0xfffULL
);
858 cmd
->data
[0] = pasid
& PASID_MASK
;
859 cmd
->data
[1] = domid
;
860 cmd
->data
[2] = lower_32_bits(address
);
861 cmd
->data
[3] = upper_32_bits(address
);
862 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK
;
863 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_GN_MASK
;
865 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
866 CMD_SET_TYPE(cmd
, CMD_INV_IOMMU_PAGES
);
869 static void build_inv_iotlb_pasid(struct iommu_cmd
*cmd
, u16 devid
, int pasid
,
870 int qdep
, u64 address
, bool size
)
872 memset(cmd
, 0, sizeof(*cmd
));
874 address
&= ~(0xfffULL
);
876 cmd
->data
[0] = devid
;
877 cmd
->data
[0] |= (pasid
& 0xff) << 16;
878 cmd
->data
[0] |= (qdep
& 0xff) << 24;
879 cmd
->data
[1] = devid
;
880 cmd
->data
[1] |= ((pasid
>> 8) & 0xfff) << 16;
881 cmd
->data
[2] = lower_32_bits(address
);
882 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_GN_MASK
;
883 cmd
->data
[3] = upper_32_bits(address
);
885 cmd
->data
[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK
;
886 CMD_SET_TYPE(cmd
, CMD_INV_IOTLB_PAGES
);
889 static void build_complete_ppr(struct iommu_cmd
*cmd
, u16 devid
, int pasid
,
890 int status
, int tag
, bool gn
)
892 memset(cmd
, 0, sizeof(*cmd
));
894 cmd
->data
[0] = devid
;
896 cmd
->data
[1] = pasid
& PASID_MASK
;
897 cmd
->data
[2] = CMD_INV_IOMMU_PAGES_GN_MASK
;
899 cmd
->data
[3] = tag
& 0x1ff;
900 cmd
->data
[3] |= (status
& PPR_STATUS_MASK
) << PPR_STATUS_SHIFT
;
902 CMD_SET_TYPE(cmd
, CMD_COMPLETE_PPR
);
905 static void build_inv_all(struct iommu_cmd
*cmd
)
907 memset(cmd
, 0, sizeof(*cmd
));
908 CMD_SET_TYPE(cmd
, CMD_INV_ALL
);
911 static void build_inv_irt(struct iommu_cmd
*cmd
, u16 devid
)
913 memset(cmd
, 0, sizeof(*cmd
));
914 cmd
->data
[0] = devid
;
915 CMD_SET_TYPE(cmd
, CMD_INV_IRT
);
919 * Writes the command to the IOMMUs command buffer and informs the
920 * hardware about the new command.
922 static int iommu_queue_command_sync(struct amd_iommu
*iommu
,
923 struct iommu_cmd
*cmd
,
926 u32 left
, tail
, head
, next_tail
;
929 WARN_ON(iommu
->cmd_buf_size
& CMD_BUFFER_UNINITIALIZED
);
932 spin_lock_irqsave(&iommu
->lock
, flags
);
934 head
= readl(iommu
->mmio_base
+ MMIO_CMD_HEAD_OFFSET
);
935 tail
= readl(iommu
->mmio_base
+ MMIO_CMD_TAIL_OFFSET
);
936 next_tail
= (tail
+ sizeof(*cmd
)) % iommu
->cmd_buf_size
;
937 left
= (head
- next_tail
) % iommu
->cmd_buf_size
;
940 struct iommu_cmd sync_cmd
;
941 volatile u64 sem
= 0;
944 build_completion_wait(&sync_cmd
, (u64
)&sem
);
945 copy_cmd_to_buffer(iommu
, &sync_cmd
, tail
);
947 spin_unlock_irqrestore(&iommu
->lock
, flags
);
949 if ((ret
= wait_on_sem(&sem
)) != 0)
955 copy_cmd_to_buffer(iommu
, cmd
, tail
);
957 /* We need to sync now to make sure all commands are processed */
958 iommu
->need_sync
= sync
;
960 spin_unlock_irqrestore(&iommu
->lock
, flags
);
965 static int iommu_queue_command(struct amd_iommu
*iommu
, struct iommu_cmd
*cmd
)
967 return iommu_queue_command_sync(iommu
, cmd
, true);
971 * This function queues a completion wait command into the command
974 static int iommu_completion_wait(struct amd_iommu
*iommu
)
976 struct iommu_cmd cmd
;
977 volatile u64 sem
= 0;
980 if (!iommu
->need_sync
)
983 build_completion_wait(&cmd
, (u64
)&sem
);
985 ret
= iommu_queue_command_sync(iommu
, &cmd
, false);
989 return wait_on_sem(&sem
);
992 static int iommu_flush_dte(struct amd_iommu
*iommu
, u16 devid
)
994 struct iommu_cmd cmd
;
996 build_inv_dte(&cmd
, devid
);
998 return iommu_queue_command(iommu
, &cmd
);
1001 static void iommu_flush_dte_all(struct amd_iommu
*iommu
)
1005 for (devid
= 0; devid
<= 0xffff; ++devid
)
1006 iommu_flush_dte(iommu
, devid
);
1008 iommu_completion_wait(iommu
);
1012 * This function uses heavy locking and may disable irqs for some time. But
1013 * this is no issue because it is only called during resume.
1015 static void iommu_flush_tlb_all(struct amd_iommu
*iommu
)
1019 for (dom_id
= 0; dom_id
<= 0xffff; ++dom_id
) {
1020 struct iommu_cmd cmd
;
1021 build_inv_iommu_pages(&cmd
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
,
1023 iommu_queue_command(iommu
, &cmd
);
1026 iommu_completion_wait(iommu
);
1029 static void iommu_flush_all(struct amd_iommu
*iommu
)
1031 struct iommu_cmd cmd
;
1033 build_inv_all(&cmd
);
1035 iommu_queue_command(iommu
, &cmd
);
1036 iommu_completion_wait(iommu
);
1039 static void iommu_flush_irt(struct amd_iommu
*iommu
, u16 devid
)
1041 struct iommu_cmd cmd
;
1043 build_inv_irt(&cmd
, devid
);
1045 iommu_queue_command(iommu
, &cmd
);
1048 static void iommu_flush_irt_all(struct amd_iommu
*iommu
)
1052 for (devid
= 0; devid
<= MAX_DEV_TABLE_ENTRIES
; devid
++)
1053 iommu_flush_irt(iommu
, devid
);
1055 iommu_completion_wait(iommu
);
1058 void iommu_flush_all_caches(struct amd_iommu
*iommu
)
1060 if (iommu_feature(iommu
, FEATURE_IA
)) {
1061 iommu_flush_all(iommu
);
1063 iommu_flush_dte_all(iommu
);
1064 iommu_flush_irt_all(iommu
);
1065 iommu_flush_tlb_all(iommu
);
1070 * Command send function for flushing on-device TLB
1072 static int device_flush_iotlb(struct iommu_dev_data
*dev_data
,
1073 u64 address
, size_t size
)
1075 struct amd_iommu
*iommu
;
1076 struct iommu_cmd cmd
;
1079 qdep
= dev_data
->ats
.qdep
;
1080 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
1082 build_inv_iotlb_pages(&cmd
, dev_data
->devid
, qdep
, address
, size
);
1084 return iommu_queue_command(iommu
, &cmd
);
1088 * Command send function for invalidating a device table entry
1090 static int device_flush_dte(struct iommu_dev_data
*dev_data
)
1092 struct amd_iommu
*iommu
;
1095 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
1097 ret
= iommu_flush_dte(iommu
, dev_data
->devid
);
1101 if (dev_data
->ats
.enabled
)
1102 ret
= device_flush_iotlb(dev_data
, 0, ~0UL);
1108 * TLB invalidation function which is called from the mapping functions.
1109 * It invalidates a single PTE if the range to flush is within a single
1110 * page. Otherwise it flushes the whole TLB of the IOMMU.
1112 static void __domain_flush_pages(struct protection_domain
*domain
,
1113 u64 address
, size_t size
, int pde
)
1115 struct iommu_dev_data
*dev_data
;
1116 struct iommu_cmd cmd
;
1119 build_inv_iommu_pages(&cmd
, address
, size
, domain
->id
, pde
);
1121 for (i
= 0; i
< amd_iommus_present
; ++i
) {
1122 if (!domain
->dev_iommu
[i
])
1126 * Devices of this domain are behind this IOMMU
1127 * We need a TLB flush
1129 ret
|= iommu_queue_command(amd_iommus
[i
], &cmd
);
1132 list_for_each_entry(dev_data
, &domain
->dev_list
, list
) {
1134 if (!dev_data
->ats
.enabled
)
1137 ret
|= device_flush_iotlb(dev_data
, address
, size
);
1143 static void domain_flush_pages(struct protection_domain
*domain
,
1144 u64 address
, size_t size
)
1146 __domain_flush_pages(domain
, address
, size
, 0);
1149 /* Flush the whole IO/TLB for a given protection domain */
1150 static void domain_flush_tlb(struct protection_domain
*domain
)
1152 __domain_flush_pages(domain
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
, 0);
1155 /* Flush the whole IO/TLB for a given protection domain - including PDE */
1156 static void domain_flush_tlb_pde(struct protection_domain
*domain
)
1158 __domain_flush_pages(domain
, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
, 1);
1161 static void domain_flush_complete(struct protection_domain
*domain
)
1165 for (i
= 0; i
< amd_iommus_present
; ++i
) {
1166 if (!domain
->dev_iommu
[i
])
1170 * Devices of this domain are behind this IOMMU
1171 * We need to wait for completion of all commands.
1173 iommu_completion_wait(amd_iommus
[i
]);
1179 * This function flushes the DTEs for all devices in domain
1181 static void domain_flush_devices(struct protection_domain
*domain
)
1183 struct iommu_dev_data
*dev_data
;
1185 list_for_each_entry(dev_data
, &domain
->dev_list
, list
)
1186 device_flush_dte(dev_data
);
1189 /****************************************************************************
1191 * The functions below are used the create the page table mappings for
1192 * unity mapped regions.
1194 ****************************************************************************/
1197 * This function is used to add another level to an IO page table. Adding
1198 * another level increases the size of the address space by 9 bits to a size up
1201 static bool increase_address_space(struct protection_domain
*domain
,
1206 if (domain
->mode
== PAGE_MODE_6_LEVEL
)
1207 /* address space already 64 bit large */
1210 pte
= (void *)get_zeroed_page(gfp
);
1214 *pte
= PM_LEVEL_PDE(domain
->mode
,
1215 virt_to_phys(domain
->pt_root
));
1216 domain
->pt_root
= pte
;
1218 domain
->updated
= true;
1223 static u64
*alloc_pte(struct protection_domain
*domain
,
1224 unsigned long address
,
1225 unsigned long page_size
,
1232 BUG_ON(!is_power_of_2(page_size
));
1234 while (address
> PM_LEVEL_SIZE(domain
->mode
))
1235 increase_address_space(domain
, gfp
);
1237 level
= domain
->mode
- 1;
1238 pte
= &domain
->pt_root
[PM_LEVEL_INDEX(level
, address
)];
1239 address
= PAGE_SIZE_ALIGN(address
, page_size
);
1240 end_lvl
= PAGE_SIZE_LEVEL(page_size
);
1242 while (level
> end_lvl
) {
1243 if (!IOMMU_PTE_PRESENT(*pte
)) {
1244 page
= (u64
*)get_zeroed_page(gfp
);
1247 *pte
= PM_LEVEL_PDE(level
, virt_to_phys(page
));
1250 /* No level skipping support yet */
1251 if (PM_PTE_LEVEL(*pte
) != level
)
1256 pte
= IOMMU_PTE_PAGE(*pte
);
1258 if (pte_page
&& level
== end_lvl
)
1261 pte
= &pte
[PM_LEVEL_INDEX(level
, address
)];
1268 * This function checks if there is a PTE for a given dma address. If
1269 * there is one, it returns the pointer to it.
1271 static u64
*fetch_pte(struct protection_domain
*domain
, unsigned long address
)
1276 if (address
> PM_LEVEL_SIZE(domain
->mode
))
1279 level
= domain
->mode
- 1;
1280 pte
= &domain
->pt_root
[PM_LEVEL_INDEX(level
, address
)];
1285 if (!IOMMU_PTE_PRESENT(*pte
))
1289 if (PM_PTE_LEVEL(*pte
) == 0x07) {
1290 unsigned long pte_mask
, __pte
;
1293 * If we have a series of large PTEs, make
1294 * sure to return a pointer to the first one.
1296 pte_mask
= PTE_PAGE_SIZE(*pte
);
1297 pte_mask
= ~((PAGE_SIZE_PTE_COUNT(pte_mask
) << 3) - 1);
1298 __pte
= ((unsigned long)pte
) & pte_mask
;
1300 return (u64
*)__pte
;
1303 /* No level skipping support yet */
1304 if (PM_PTE_LEVEL(*pte
) != level
)
1309 /* Walk to the next level */
1310 pte
= IOMMU_PTE_PAGE(*pte
);
1311 pte
= &pte
[PM_LEVEL_INDEX(level
, address
)];
1318 * Generic mapping functions. It maps a physical address into a DMA
1319 * address space. It allocates the page table pages if necessary.
1320 * In the future it can be extended to a generic mapping function
1321 * supporting all features of AMD IOMMU page tables like level skipping
1322 * and full 64 bit address spaces.
1324 static int iommu_map_page(struct protection_domain
*dom
,
1325 unsigned long bus_addr
,
1326 unsigned long phys_addr
,
1328 unsigned long page_size
)
1333 if (!(prot
& IOMMU_PROT_MASK
))
1336 bus_addr
= PAGE_ALIGN(bus_addr
);
1337 phys_addr
= PAGE_ALIGN(phys_addr
);
1338 count
= PAGE_SIZE_PTE_COUNT(page_size
);
1339 pte
= alloc_pte(dom
, bus_addr
, page_size
, NULL
, GFP_KERNEL
);
1341 for (i
= 0; i
< count
; ++i
)
1342 if (IOMMU_PTE_PRESENT(pte
[i
]))
1345 if (page_size
> PAGE_SIZE
) {
1346 __pte
= PAGE_SIZE_PTE(phys_addr
, page_size
);
1347 __pte
|= PM_LEVEL_ENC(7) | IOMMU_PTE_P
| IOMMU_PTE_FC
;
1349 __pte
= phys_addr
| IOMMU_PTE_P
| IOMMU_PTE_FC
;
1351 if (prot
& IOMMU_PROT_IR
)
1352 __pte
|= IOMMU_PTE_IR
;
1353 if (prot
& IOMMU_PROT_IW
)
1354 __pte
|= IOMMU_PTE_IW
;
1356 for (i
= 0; i
< count
; ++i
)
1364 static unsigned long iommu_unmap_page(struct protection_domain
*dom
,
1365 unsigned long bus_addr
,
1366 unsigned long page_size
)
1368 unsigned long long unmap_size
, unmapped
;
1371 BUG_ON(!is_power_of_2(page_size
));
1375 while (unmapped
< page_size
) {
1377 pte
= fetch_pte(dom
, bus_addr
);
1381 * No PTE for this address
1382 * move forward in 4kb steps
1384 unmap_size
= PAGE_SIZE
;
1385 } else if (PM_PTE_LEVEL(*pte
) == 0) {
1386 /* 4kb PTE found for this address */
1387 unmap_size
= PAGE_SIZE
;
1392 /* Large PTE found which maps this address */
1393 unmap_size
= PTE_PAGE_SIZE(*pte
);
1394 count
= PAGE_SIZE_PTE_COUNT(unmap_size
);
1395 for (i
= 0; i
< count
; i
++)
1399 bus_addr
= (bus_addr
& ~(unmap_size
- 1)) + unmap_size
;
1400 unmapped
+= unmap_size
;
1403 BUG_ON(!is_power_of_2(unmapped
));
1409 * This function checks if a specific unity mapping entry is needed for
1410 * this specific IOMMU.
1412 static int iommu_for_unity_map(struct amd_iommu
*iommu
,
1413 struct unity_map_entry
*entry
)
1417 for (i
= entry
->devid_start
; i
<= entry
->devid_end
; ++i
) {
1418 bdf
= amd_iommu_alias_table
[i
];
1419 if (amd_iommu_rlookup_table
[bdf
] == iommu
)
1427 * This function actually applies the mapping to the page table of the
1430 static int dma_ops_unity_map(struct dma_ops_domain
*dma_dom
,
1431 struct unity_map_entry
*e
)
1436 for (addr
= e
->address_start
; addr
< e
->address_end
;
1437 addr
+= PAGE_SIZE
) {
1438 ret
= iommu_map_page(&dma_dom
->domain
, addr
, addr
, e
->prot
,
1443 * if unity mapping is in aperture range mark the page
1444 * as allocated in the aperture
1446 if (addr
< dma_dom
->aperture_size
)
1447 __set_bit(addr
>> PAGE_SHIFT
,
1448 dma_dom
->aperture
[0]->bitmap
);
1455 * Init the unity mappings for a specific IOMMU in the system
1457 * Basically iterates over all unity mapping entries and applies them to
1458 * the default domain DMA of that IOMMU if necessary.
1460 static int iommu_init_unity_mappings(struct amd_iommu
*iommu
)
1462 struct unity_map_entry
*entry
;
1465 list_for_each_entry(entry
, &amd_iommu_unity_map
, list
) {
1466 if (!iommu_for_unity_map(iommu
, entry
))
1468 ret
= dma_ops_unity_map(iommu
->default_dom
, entry
);
1477 * Inits the unity mappings required for a specific device
1479 static int init_unity_mappings_for_device(struct dma_ops_domain
*dma_dom
,
1482 struct unity_map_entry
*e
;
1485 list_for_each_entry(e
, &amd_iommu_unity_map
, list
) {
1486 if (!(devid
>= e
->devid_start
&& devid
<= e
->devid_end
))
1488 ret
= dma_ops_unity_map(dma_dom
, e
);
1496 /****************************************************************************
1498 * The next functions belong to the address allocator for the dma_ops
1499 * interface functions. They work like the allocators in the other IOMMU
1500 * drivers. Its basically a bitmap which marks the allocated pages in
1501 * the aperture. Maybe it could be enhanced in the future to a more
1502 * efficient allocator.
1504 ****************************************************************************/
1507 * The address allocator core functions.
1509 * called with domain->lock held
1513 * Used to reserve address ranges in the aperture (e.g. for exclusion
1516 static void dma_ops_reserve_addresses(struct dma_ops_domain
*dom
,
1517 unsigned long start_page
,
1520 unsigned int i
, last_page
= dom
->aperture_size
>> PAGE_SHIFT
;
1522 if (start_page
+ pages
> last_page
)
1523 pages
= last_page
- start_page
;
1525 for (i
= start_page
; i
< start_page
+ pages
; ++i
) {
1526 int index
= i
/ APERTURE_RANGE_PAGES
;
1527 int page
= i
% APERTURE_RANGE_PAGES
;
1528 __set_bit(page
, dom
->aperture
[index
]->bitmap
);
1533 * This function is used to add a new aperture range to an existing
1534 * aperture in case of dma_ops domain allocation or address allocation
1537 static int alloc_new_range(struct dma_ops_domain
*dma_dom
,
1538 bool populate
, gfp_t gfp
)
1540 int index
= dma_dom
->aperture_size
>> APERTURE_RANGE_SHIFT
;
1541 struct amd_iommu
*iommu
;
1542 unsigned long i
, old_size
;
1544 #ifdef CONFIG_IOMMU_STRESS
1548 if (index
>= APERTURE_MAX_RANGES
)
1551 dma_dom
->aperture
[index
] = kzalloc(sizeof(struct aperture_range
), gfp
);
1552 if (!dma_dom
->aperture
[index
])
1555 dma_dom
->aperture
[index
]->bitmap
= (void *)get_zeroed_page(gfp
);
1556 if (!dma_dom
->aperture
[index
]->bitmap
)
1559 dma_dom
->aperture
[index
]->offset
= dma_dom
->aperture_size
;
1562 unsigned long address
= dma_dom
->aperture_size
;
1563 int i
, num_ptes
= APERTURE_RANGE_PAGES
/ 512;
1564 u64
*pte
, *pte_page
;
1566 for (i
= 0; i
< num_ptes
; ++i
) {
1567 pte
= alloc_pte(&dma_dom
->domain
, address
, PAGE_SIZE
,
1572 dma_dom
->aperture
[index
]->pte_pages
[i
] = pte_page
;
1574 address
+= APERTURE_RANGE_SIZE
/ 64;
1578 old_size
= dma_dom
->aperture_size
;
1579 dma_dom
->aperture_size
+= APERTURE_RANGE_SIZE
;
1581 /* Reserve address range used for MSI messages */
1582 if (old_size
< MSI_ADDR_BASE_LO
&&
1583 dma_dom
->aperture_size
> MSI_ADDR_BASE_LO
) {
1584 unsigned long spage
;
1587 pages
= iommu_num_pages(MSI_ADDR_BASE_LO
, 0x10000, PAGE_SIZE
);
1588 spage
= MSI_ADDR_BASE_LO
>> PAGE_SHIFT
;
1590 dma_ops_reserve_addresses(dma_dom
, spage
, pages
);
1593 /* Initialize the exclusion range if necessary */
1594 for_each_iommu(iommu
) {
1595 if (iommu
->exclusion_start
&&
1596 iommu
->exclusion_start
>= dma_dom
->aperture
[index
]->offset
1597 && iommu
->exclusion_start
< dma_dom
->aperture_size
) {
1598 unsigned long startpage
;
1599 int pages
= iommu_num_pages(iommu
->exclusion_start
,
1600 iommu
->exclusion_length
,
1602 startpage
= iommu
->exclusion_start
>> PAGE_SHIFT
;
1603 dma_ops_reserve_addresses(dma_dom
, startpage
, pages
);
1608 * Check for areas already mapped as present in the new aperture
1609 * range and mark those pages as reserved in the allocator. Such
1610 * mappings may already exist as a result of requested unity
1611 * mappings for devices.
1613 for (i
= dma_dom
->aperture
[index
]->offset
;
1614 i
< dma_dom
->aperture_size
;
1616 u64
*pte
= fetch_pte(&dma_dom
->domain
, i
);
1617 if (!pte
|| !IOMMU_PTE_PRESENT(*pte
))
1620 dma_ops_reserve_addresses(dma_dom
, i
>> PAGE_SHIFT
, 1);
1623 update_domain(&dma_dom
->domain
);
1628 update_domain(&dma_dom
->domain
);
1630 free_page((unsigned long)dma_dom
->aperture
[index
]->bitmap
);
1632 kfree(dma_dom
->aperture
[index
]);
1633 dma_dom
->aperture
[index
] = NULL
;
1638 static unsigned long dma_ops_area_alloc(struct device
*dev
,
1639 struct dma_ops_domain
*dom
,
1641 unsigned long align_mask
,
1643 unsigned long start
)
1645 unsigned long next_bit
= dom
->next_address
% APERTURE_RANGE_SIZE
;
1646 int max_index
= dom
->aperture_size
>> APERTURE_RANGE_SHIFT
;
1647 int i
= start
>> APERTURE_RANGE_SHIFT
;
1648 unsigned long boundary_size
;
1649 unsigned long address
= -1;
1650 unsigned long limit
;
1652 next_bit
>>= PAGE_SHIFT
;
1654 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
1655 PAGE_SIZE
) >> PAGE_SHIFT
;
1657 for (;i
< max_index
; ++i
) {
1658 unsigned long offset
= dom
->aperture
[i
]->offset
>> PAGE_SHIFT
;
1660 if (dom
->aperture
[i
]->offset
>= dma_mask
)
1663 limit
= iommu_device_max_index(APERTURE_RANGE_PAGES
, offset
,
1664 dma_mask
>> PAGE_SHIFT
);
1666 address
= iommu_area_alloc(dom
->aperture
[i
]->bitmap
,
1667 limit
, next_bit
, pages
, 0,
1668 boundary_size
, align_mask
);
1669 if (address
!= -1) {
1670 address
= dom
->aperture
[i
]->offset
+
1671 (address
<< PAGE_SHIFT
);
1672 dom
->next_address
= address
+ (pages
<< PAGE_SHIFT
);
1682 static unsigned long dma_ops_alloc_addresses(struct device
*dev
,
1683 struct dma_ops_domain
*dom
,
1685 unsigned long align_mask
,
1688 unsigned long address
;
1690 #ifdef CONFIG_IOMMU_STRESS
1691 dom
->next_address
= 0;
1692 dom
->need_flush
= true;
1695 address
= dma_ops_area_alloc(dev
, dom
, pages
, align_mask
,
1696 dma_mask
, dom
->next_address
);
1698 if (address
== -1) {
1699 dom
->next_address
= 0;
1700 address
= dma_ops_area_alloc(dev
, dom
, pages
, align_mask
,
1702 dom
->need_flush
= true;
1705 if (unlikely(address
== -1))
1706 address
= DMA_ERROR_CODE
;
1708 WARN_ON((address
+ (PAGE_SIZE
*pages
)) > dom
->aperture_size
);
1714 * The address free function.
1716 * called with domain->lock held
1718 static void dma_ops_free_addresses(struct dma_ops_domain
*dom
,
1719 unsigned long address
,
1722 unsigned i
= address
>> APERTURE_RANGE_SHIFT
;
1723 struct aperture_range
*range
= dom
->aperture
[i
];
1725 BUG_ON(i
>= APERTURE_MAX_RANGES
|| range
== NULL
);
1727 #ifdef CONFIG_IOMMU_STRESS
1732 if (address
>= dom
->next_address
)
1733 dom
->need_flush
= true;
1735 address
= (address
% APERTURE_RANGE_SIZE
) >> PAGE_SHIFT
;
1737 bitmap_clear(range
->bitmap
, address
, pages
);
1741 /****************************************************************************
1743 * The next functions belong to the domain allocation. A domain is
1744 * allocated for every IOMMU as the default domain. If device isolation
1745 * is enabled, every device get its own domain. The most important thing
1746 * about domains is the page table mapping the DMA address space they
1749 ****************************************************************************/
1752 * This function adds a protection domain to the global protection domain list
1754 static void add_domain_to_list(struct protection_domain
*domain
)
1756 unsigned long flags
;
1758 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
1759 list_add(&domain
->list
, &amd_iommu_pd_list
);
1760 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
1764 * This function removes a protection domain to the global
1765 * protection domain list
1767 static void del_domain_from_list(struct protection_domain
*domain
)
1769 unsigned long flags
;
1771 spin_lock_irqsave(&amd_iommu_pd_lock
, flags
);
1772 list_del(&domain
->list
);
1773 spin_unlock_irqrestore(&amd_iommu_pd_lock
, flags
);
1776 static u16
domain_id_alloc(void)
1778 unsigned long flags
;
1781 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1782 id
= find_first_zero_bit(amd_iommu_pd_alloc_bitmap
, MAX_DOMAIN_ID
);
1784 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1785 __set_bit(id
, amd_iommu_pd_alloc_bitmap
);
1788 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1793 static void domain_id_free(int id
)
1795 unsigned long flags
;
1797 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
1798 if (id
> 0 && id
< MAX_DOMAIN_ID
)
1799 __clear_bit(id
, amd_iommu_pd_alloc_bitmap
);
1800 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
1803 static void free_pagetable(struct protection_domain
*domain
)
1808 p1
= domain
->pt_root
;
1813 for (i
= 0; i
< 512; ++i
) {
1814 if (!IOMMU_PTE_PRESENT(p1
[i
]))
1817 p2
= IOMMU_PTE_PAGE(p1
[i
]);
1818 for (j
= 0; j
< 512; ++j
) {
1819 if (!IOMMU_PTE_PRESENT(p2
[j
]))
1821 p3
= IOMMU_PTE_PAGE(p2
[j
]);
1822 free_page((unsigned long)p3
);
1825 free_page((unsigned long)p2
);
1828 free_page((unsigned long)p1
);
1830 domain
->pt_root
= NULL
;
1833 static void free_gcr3_tbl_level1(u64
*tbl
)
1838 for (i
= 0; i
< 512; ++i
) {
1839 if (!(tbl
[i
] & GCR3_VALID
))
1842 ptr
= __va(tbl
[i
] & PAGE_MASK
);
1844 free_page((unsigned long)ptr
);
1848 static void free_gcr3_tbl_level2(u64
*tbl
)
1853 for (i
= 0; i
< 512; ++i
) {
1854 if (!(tbl
[i
] & GCR3_VALID
))
1857 ptr
= __va(tbl
[i
] & PAGE_MASK
);
1859 free_gcr3_tbl_level1(ptr
);
1863 static void free_gcr3_table(struct protection_domain
*domain
)
1865 if (domain
->glx
== 2)
1866 free_gcr3_tbl_level2(domain
->gcr3_tbl
);
1867 else if (domain
->glx
== 1)
1868 free_gcr3_tbl_level1(domain
->gcr3_tbl
);
1869 else if (domain
->glx
!= 0)
1872 free_page((unsigned long)domain
->gcr3_tbl
);
1876 * Free a domain, only used if something went wrong in the
1877 * allocation path and we need to free an already allocated page table
1879 static void dma_ops_domain_free(struct dma_ops_domain
*dom
)
1886 del_domain_from_list(&dom
->domain
);
1888 free_pagetable(&dom
->domain
);
1890 for (i
= 0; i
< APERTURE_MAX_RANGES
; ++i
) {
1891 if (!dom
->aperture
[i
])
1893 free_page((unsigned long)dom
->aperture
[i
]->bitmap
);
1894 kfree(dom
->aperture
[i
]);
1901 * Allocates a new protection domain usable for the dma_ops functions.
1902 * It also initializes the page table and the address allocator data
1903 * structures required for the dma_ops interface
1905 static struct dma_ops_domain
*dma_ops_domain_alloc(void)
1907 struct dma_ops_domain
*dma_dom
;
1909 dma_dom
= kzalloc(sizeof(struct dma_ops_domain
), GFP_KERNEL
);
1913 spin_lock_init(&dma_dom
->domain
.lock
);
1915 dma_dom
->domain
.id
= domain_id_alloc();
1916 if (dma_dom
->domain
.id
== 0)
1918 INIT_LIST_HEAD(&dma_dom
->domain
.dev_list
);
1919 dma_dom
->domain
.mode
= PAGE_MODE_2_LEVEL
;
1920 dma_dom
->domain
.pt_root
= (void *)get_zeroed_page(GFP_KERNEL
);
1921 dma_dom
->domain
.flags
= PD_DMA_OPS_MASK
;
1922 dma_dom
->domain
.priv
= dma_dom
;
1923 if (!dma_dom
->domain
.pt_root
)
1926 dma_dom
->need_flush
= false;
1927 dma_dom
->target_dev
= 0xffff;
1929 add_domain_to_list(&dma_dom
->domain
);
1931 if (alloc_new_range(dma_dom
, true, GFP_KERNEL
))
1935 * mark the first page as allocated so we never return 0 as
1936 * a valid dma-address. So we can use 0 as error value
1938 dma_dom
->aperture
[0]->bitmap
[0] = 1;
1939 dma_dom
->next_address
= 0;
1945 dma_ops_domain_free(dma_dom
);
1951 * little helper function to check whether a given protection domain is a
1954 static bool dma_ops_domain(struct protection_domain
*domain
)
1956 return domain
->flags
& PD_DMA_OPS_MASK
;
1959 static void set_dte_entry(u16 devid
, struct protection_domain
*domain
, bool ats
)
1964 if (domain
->mode
!= PAGE_MODE_NONE
)
1965 pte_root
= virt_to_phys(domain
->pt_root
);
1967 pte_root
|= (domain
->mode
& DEV_ENTRY_MODE_MASK
)
1968 << DEV_ENTRY_MODE_SHIFT
;
1969 pte_root
|= IOMMU_PTE_IR
| IOMMU_PTE_IW
| IOMMU_PTE_P
| IOMMU_PTE_TV
;
1971 flags
= amd_iommu_dev_table
[devid
].data
[1];
1974 flags
|= DTE_FLAG_IOTLB
;
1976 if (domain
->flags
& PD_IOMMUV2_MASK
) {
1977 u64 gcr3
= __pa(domain
->gcr3_tbl
);
1978 u64 glx
= domain
->glx
;
1981 pte_root
|= DTE_FLAG_GV
;
1982 pte_root
|= (glx
& DTE_GLX_MASK
) << DTE_GLX_SHIFT
;
1984 /* First mask out possible old values for GCR3 table */
1985 tmp
= DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B
;
1988 tmp
= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C
;
1991 /* Encode GCR3 table into DTE */
1992 tmp
= DTE_GCR3_VAL_A(gcr3
) << DTE_GCR3_SHIFT_A
;
1995 tmp
= DTE_GCR3_VAL_B(gcr3
) << DTE_GCR3_SHIFT_B
;
1998 tmp
= DTE_GCR3_VAL_C(gcr3
) << DTE_GCR3_SHIFT_C
;
2002 flags
&= ~(0xffffUL
);
2003 flags
|= domain
->id
;
2005 amd_iommu_dev_table
[devid
].data
[1] = flags
;
2006 amd_iommu_dev_table
[devid
].data
[0] = pte_root
;
2009 static void clear_dte_entry(u16 devid
)
2011 /* remove entry from the device table seen by the hardware */
2012 amd_iommu_dev_table
[devid
].data
[0] = IOMMU_PTE_P
| IOMMU_PTE_TV
;
2013 amd_iommu_dev_table
[devid
].data
[1] = 0;
2015 amd_iommu_apply_erratum_63(devid
);
2018 static void do_attach(struct iommu_dev_data
*dev_data
,
2019 struct protection_domain
*domain
)
2021 struct amd_iommu
*iommu
;
2024 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
2025 ats
= dev_data
->ats
.enabled
;
2027 /* Update data structures */
2028 dev_data
->domain
= domain
;
2029 list_add(&dev_data
->list
, &domain
->dev_list
);
2030 set_dte_entry(dev_data
->devid
, domain
, ats
);
2032 /* Do reference counting */
2033 domain
->dev_iommu
[iommu
->index
] += 1;
2034 domain
->dev_cnt
+= 1;
2036 /* Flush the DTE entry */
2037 device_flush_dte(dev_data
);
2040 static void do_detach(struct iommu_dev_data
*dev_data
)
2042 struct amd_iommu
*iommu
;
2044 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
2046 /* decrease reference counters */
2047 dev_data
->domain
->dev_iommu
[iommu
->index
] -= 1;
2048 dev_data
->domain
->dev_cnt
-= 1;
2050 /* Update data structures */
2051 dev_data
->domain
= NULL
;
2052 list_del(&dev_data
->list
);
2053 clear_dte_entry(dev_data
->devid
);
2055 /* Flush the DTE entry */
2056 device_flush_dte(dev_data
);
2060 * If a device is not yet associated with a domain, this function does
2061 * assigns it visible for the hardware
2063 static int __attach_device(struct iommu_dev_data
*dev_data
,
2064 struct protection_domain
*domain
)
2069 spin_lock(&domain
->lock
);
2071 if (dev_data
->alias_data
!= NULL
) {
2072 struct iommu_dev_data
*alias_data
= dev_data
->alias_data
;
2074 /* Some sanity checks */
2076 if (alias_data
->domain
!= NULL
&&
2077 alias_data
->domain
!= domain
)
2080 if (dev_data
->domain
!= NULL
&&
2081 dev_data
->domain
!= domain
)
2084 /* Do real assignment */
2085 if (alias_data
->domain
== NULL
)
2086 do_attach(alias_data
, domain
);
2088 atomic_inc(&alias_data
->bind
);
2091 if (dev_data
->domain
== NULL
)
2092 do_attach(dev_data
, domain
);
2094 atomic_inc(&dev_data
->bind
);
2101 spin_unlock(&domain
->lock
);
2107 static void pdev_iommuv2_disable(struct pci_dev
*pdev
)
2109 pci_disable_ats(pdev
);
2110 pci_disable_pri(pdev
);
2111 pci_disable_pasid(pdev
);
2114 /* FIXME: Change generic reset-function to do the same */
2115 static int pri_reset_while_enabled(struct pci_dev
*pdev
)
2120 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_PRI
);
2124 pci_read_config_word(pdev
, pos
+ PCI_PRI_CTRL
, &control
);
2125 control
|= PCI_PRI_CTRL_RESET
;
2126 pci_write_config_word(pdev
, pos
+ PCI_PRI_CTRL
, control
);
2131 static int pdev_iommuv2_enable(struct pci_dev
*pdev
)
2136 /* FIXME: Hardcode number of outstanding requests for now */
2138 if (pdev_pri_erratum(pdev
, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE
))
2140 reset_enable
= pdev_pri_erratum(pdev
, AMD_PRI_DEV_ERRATUM_ENABLE_RESET
);
2142 /* Only allow access to user-accessible pages */
2143 ret
= pci_enable_pasid(pdev
, 0);
2147 /* First reset the PRI state of the device */
2148 ret
= pci_reset_pri(pdev
);
2153 ret
= pci_enable_pri(pdev
, reqs
);
2158 ret
= pri_reset_while_enabled(pdev
);
2163 ret
= pci_enable_ats(pdev
, PAGE_SHIFT
);
2170 pci_disable_pri(pdev
);
2171 pci_disable_pasid(pdev
);
2176 /* FIXME: Move this to PCI code */
2177 #define PCI_PRI_TLP_OFF (1 << 15)
2179 static bool pci_pri_tlp_required(struct pci_dev
*pdev
)
2184 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_PRI
);
2188 pci_read_config_word(pdev
, pos
+ PCI_PRI_STATUS
, &status
);
2190 return (status
& PCI_PRI_TLP_OFF
) ? true : false;
2194 * If a device is not yet associated with a domain, this function does
2195 * assigns it visible for the hardware
2197 static int attach_device(struct device
*dev
,
2198 struct protection_domain
*domain
)
2200 struct pci_dev
*pdev
= to_pci_dev(dev
);
2201 struct iommu_dev_data
*dev_data
;
2202 unsigned long flags
;
2205 dev_data
= get_dev_data(dev
);
2207 if (domain
->flags
& PD_IOMMUV2_MASK
) {
2208 if (!dev_data
->iommu_v2
|| !dev_data
->passthrough
)
2211 if (pdev_iommuv2_enable(pdev
) != 0)
2214 dev_data
->ats
.enabled
= true;
2215 dev_data
->ats
.qdep
= pci_ats_queue_depth(pdev
);
2216 dev_data
->pri_tlp
= pci_pri_tlp_required(pdev
);
2217 } else if (amd_iommu_iotlb_sup
&&
2218 pci_enable_ats(pdev
, PAGE_SHIFT
) == 0) {
2219 dev_data
->ats
.enabled
= true;
2220 dev_data
->ats
.qdep
= pci_ats_queue_depth(pdev
);
2223 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
2224 ret
= __attach_device(dev_data
, domain
);
2225 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
2228 * We might boot into a crash-kernel here. The crashed kernel
2229 * left the caches in the IOMMU dirty. So we have to flush
2230 * here to evict all dirty stuff.
2232 domain_flush_tlb_pde(domain
);
2238 * Removes a device from a protection domain (unlocked)
2240 static void __detach_device(struct iommu_dev_data
*dev_data
)
2242 struct protection_domain
*domain
;
2243 unsigned long flags
;
2245 BUG_ON(!dev_data
->domain
);
2247 domain
= dev_data
->domain
;
2249 spin_lock_irqsave(&domain
->lock
, flags
);
2251 if (dev_data
->alias_data
!= NULL
) {
2252 struct iommu_dev_data
*alias_data
= dev_data
->alias_data
;
2254 if (atomic_dec_and_test(&alias_data
->bind
))
2255 do_detach(alias_data
);
2258 if (atomic_dec_and_test(&dev_data
->bind
))
2259 do_detach(dev_data
);
2261 spin_unlock_irqrestore(&domain
->lock
, flags
);
2264 * If we run in passthrough mode the device must be assigned to the
2265 * passthrough domain if it is detached from any other domain.
2266 * Make sure we can deassign from the pt_domain itself.
2268 if (dev_data
->passthrough
&&
2269 (dev_data
->domain
== NULL
&& domain
!= pt_domain
))
2270 __attach_device(dev_data
, pt_domain
);
2274 * Removes a device from a protection domain (with devtable_lock held)
2276 static void detach_device(struct device
*dev
)
2278 struct protection_domain
*domain
;
2279 struct iommu_dev_data
*dev_data
;
2280 unsigned long flags
;
2282 dev_data
= get_dev_data(dev
);
2283 domain
= dev_data
->domain
;
2285 /* lock device table */
2286 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
2287 __detach_device(dev_data
);
2288 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
2290 if (domain
->flags
& PD_IOMMUV2_MASK
)
2291 pdev_iommuv2_disable(to_pci_dev(dev
));
2292 else if (dev_data
->ats
.enabled
)
2293 pci_disable_ats(to_pci_dev(dev
));
2295 dev_data
->ats
.enabled
= false;
2299 * Find out the protection domain structure for a given PCI device. This
2300 * will give us the pointer to the page table root for example.
2302 static struct protection_domain
*domain_for_device(struct device
*dev
)
2304 struct iommu_dev_data
*dev_data
;
2305 struct protection_domain
*dom
= NULL
;
2306 unsigned long flags
;
2308 dev_data
= get_dev_data(dev
);
2310 if (dev_data
->domain
)
2311 return dev_data
->domain
;
2313 if (dev_data
->alias_data
!= NULL
) {
2314 struct iommu_dev_data
*alias_data
= dev_data
->alias_data
;
2316 read_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
2317 if (alias_data
->domain
!= NULL
) {
2318 __attach_device(dev_data
, alias_data
->domain
);
2319 dom
= alias_data
->domain
;
2321 read_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
2327 static int device_change_notifier(struct notifier_block
*nb
,
2328 unsigned long action
, void *data
)
2330 struct dma_ops_domain
*dma_domain
;
2331 struct protection_domain
*domain
;
2332 struct iommu_dev_data
*dev_data
;
2333 struct device
*dev
= data
;
2334 struct amd_iommu
*iommu
;
2335 unsigned long flags
;
2338 if (!check_device(dev
))
2341 devid
= get_device_id(dev
);
2342 iommu
= amd_iommu_rlookup_table
[devid
];
2343 dev_data
= get_dev_data(dev
);
2346 case BUS_NOTIFY_UNBOUND_DRIVER
:
2348 domain
= domain_for_device(dev
);
2352 if (dev_data
->passthrough
)
2356 case BUS_NOTIFY_ADD_DEVICE
:
2358 iommu_init_device(dev
);
2361 * dev_data is still NULL and
2362 * got initialized in iommu_init_device
2364 dev_data
= get_dev_data(dev
);
2366 if (iommu_pass_through
|| dev_data
->iommu_v2
) {
2367 dev_data
->passthrough
= true;
2368 attach_device(dev
, pt_domain
);
2372 domain
= domain_for_device(dev
);
2374 /* allocate a protection domain if a device is added */
2375 dma_domain
= find_protection_domain(devid
);
2378 dma_domain
= dma_ops_domain_alloc();
2381 dma_domain
->target_dev
= devid
;
2383 spin_lock_irqsave(&iommu_pd_list_lock
, flags
);
2384 list_add_tail(&dma_domain
->list
, &iommu_pd_list
);
2385 spin_unlock_irqrestore(&iommu_pd_list_lock
, flags
);
2387 dev_data
= get_dev_data(dev
);
2389 dev
->archdata
.dma_ops
= &amd_iommu_dma_ops
;
2392 case BUS_NOTIFY_DEL_DEVICE
:
2394 iommu_uninit_device(dev
);
2400 iommu_completion_wait(iommu
);
2406 static struct notifier_block device_nb
= {
2407 .notifier_call
= device_change_notifier
,
2410 void amd_iommu_init_notifier(void)
2412 bus_register_notifier(&pci_bus_type
, &device_nb
);
2415 /*****************************************************************************
2417 * The next functions belong to the dma_ops mapping/unmapping code.
2419 *****************************************************************************/
2422 * In the dma_ops path we only have the struct device. This function
2423 * finds the corresponding IOMMU, the protection domain and the
2424 * requestor id for a given device.
2425 * If the device is not yet associated with a domain this is also done
2428 static struct protection_domain
*get_domain(struct device
*dev
)
2430 struct protection_domain
*domain
;
2431 struct dma_ops_domain
*dma_dom
;
2432 u16 devid
= get_device_id(dev
);
2434 if (!check_device(dev
))
2435 return ERR_PTR(-EINVAL
);
2437 domain
= domain_for_device(dev
);
2438 if (domain
!= NULL
&& !dma_ops_domain(domain
))
2439 return ERR_PTR(-EBUSY
);
2444 /* Device not bount yet - bind it */
2445 dma_dom
= find_protection_domain(devid
);
2447 dma_dom
= amd_iommu_rlookup_table
[devid
]->default_dom
;
2448 attach_device(dev
, &dma_dom
->domain
);
2449 DUMP_printk("Using protection domain %d for device %s\n",
2450 dma_dom
->domain
.id
, dev_name(dev
));
2452 return &dma_dom
->domain
;
2455 static void update_device_table(struct protection_domain
*domain
)
2457 struct iommu_dev_data
*dev_data
;
2459 list_for_each_entry(dev_data
, &domain
->dev_list
, list
)
2460 set_dte_entry(dev_data
->devid
, domain
, dev_data
->ats
.enabled
);
2463 static void update_domain(struct protection_domain
*domain
)
2465 if (!domain
->updated
)
2468 update_device_table(domain
);
2470 domain_flush_devices(domain
);
2471 domain_flush_tlb_pde(domain
);
2473 domain
->updated
= false;
2477 * This function fetches the PTE for a given address in the aperture
2479 static u64
* dma_ops_get_pte(struct dma_ops_domain
*dom
,
2480 unsigned long address
)
2482 struct aperture_range
*aperture
;
2483 u64
*pte
, *pte_page
;
2485 aperture
= dom
->aperture
[APERTURE_RANGE_INDEX(address
)];
2489 pte
= aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)];
2491 pte
= alloc_pte(&dom
->domain
, address
, PAGE_SIZE
, &pte_page
,
2493 aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)] = pte_page
;
2495 pte
+= PM_LEVEL_INDEX(0, address
);
2497 update_domain(&dom
->domain
);
2503 * This is the generic map function. It maps one 4kb page at paddr to
2504 * the given address in the DMA address space for the domain.
2506 static dma_addr_t
dma_ops_domain_map(struct dma_ops_domain
*dom
,
2507 unsigned long address
,
2513 WARN_ON(address
> dom
->aperture_size
);
2517 pte
= dma_ops_get_pte(dom
, address
);
2519 return DMA_ERROR_CODE
;
2521 __pte
= paddr
| IOMMU_PTE_P
| IOMMU_PTE_FC
;
2523 if (direction
== DMA_TO_DEVICE
)
2524 __pte
|= IOMMU_PTE_IR
;
2525 else if (direction
== DMA_FROM_DEVICE
)
2526 __pte
|= IOMMU_PTE_IW
;
2527 else if (direction
== DMA_BIDIRECTIONAL
)
2528 __pte
|= IOMMU_PTE_IR
| IOMMU_PTE_IW
;
2534 return (dma_addr_t
)address
;
2538 * The generic unmapping function for on page in the DMA address space.
2540 static void dma_ops_domain_unmap(struct dma_ops_domain
*dom
,
2541 unsigned long address
)
2543 struct aperture_range
*aperture
;
2546 if (address
>= dom
->aperture_size
)
2549 aperture
= dom
->aperture
[APERTURE_RANGE_INDEX(address
)];
2553 pte
= aperture
->pte_pages
[APERTURE_PAGE_INDEX(address
)];
2557 pte
+= PM_LEVEL_INDEX(0, address
);
2565 * This function contains common code for mapping of a physically
2566 * contiguous memory region into DMA address space. It is used by all
2567 * mapping functions provided with this IOMMU driver.
2568 * Must be called with the domain lock held.
2570 static dma_addr_t
__map_single(struct device
*dev
,
2571 struct dma_ops_domain
*dma_dom
,
2578 dma_addr_t offset
= paddr
& ~PAGE_MASK
;
2579 dma_addr_t address
, start
, ret
;
2581 unsigned long align_mask
= 0;
2584 pages
= iommu_num_pages(paddr
, size
, PAGE_SIZE
);
2587 INC_STATS_COUNTER(total_map_requests
);
2590 INC_STATS_COUNTER(cross_page
);
2593 align_mask
= (1UL << get_order(size
)) - 1;
2596 address
= dma_ops_alloc_addresses(dev
, dma_dom
, pages
, align_mask
,
2598 if (unlikely(address
== DMA_ERROR_CODE
)) {
2600 * setting next_address here will let the address
2601 * allocator only scan the new allocated range in the
2602 * first run. This is a small optimization.
2604 dma_dom
->next_address
= dma_dom
->aperture_size
;
2606 if (alloc_new_range(dma_dom
, false, GFP_ATOMIC
))
2610 * aperture was successfully enlarged by 128 MB, try
2617 for (i
= 0; i
< pages
; ++i
) {
2618 ret
= dma_ops_domain_map(dma_dom
, start
, paddr
, dir
);
2619 if (ret
== DMA_ERROR_CODE
)
2627 ADD_STATS_COUNTER(alloced_io_mem
, size
);
2629 if (unlikely(dma_dom
->need_flush
&& !amd_iommu_unmap_flush
)) {
2630 domain_flush_tlb(&dma_dom
->domain
);
2631 dma_dom
->need_flush
= false;
2632 } else if (unlikely(amd_iommu_np_cache
))
2633 domain_flush_pages(&dma_dom
->domain
, address
, size
);
2640 for (--i
; i
>= 0; --i
) {
2642 dma_ops_domain_unmap(dma_dom
, start
);
2645 dma_ops_free_addresses(dma_dom
, address
, pages
);
2647 return DMA_ERROR_CODE
;
2651 * Does the reverse of the __map_single function. Must be called with
2652 * the domain lock held too
2654 static void __unmap_single(struct dma_ops_domain
*dma_dom
,
2655 dma_addr_t dma_addr
,
2659 dma_addr_t flush_addr
;
2660 dma_addr_t i
, start
;
2663 if ((dma_addr
== DMA_ERROR_CODE
) ||
2664 (dma_addr
+ size
> dma_dom
->aperture_size
))
2667 flush_addr
= dma_addr
;
2668 pages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
2669 dma_addr
&= PAGE_MASK
;
2672 for (i
= 0; i
< pages
; ++i
) {
2673 dma_ops_domain_unmap(dma_dom
, start
);
2677 SUB_STATS_COUNTER(alloced_io_mem
, size
);
2679 dma_ops_free_addresses(dma_dom
, dma_addr
, pages
);
2681 if (amd_iommu_unmap_flush
|| dma_dom
->need_flush
) {
2682 domain_flush_pages(&dma_dom
->domain
, flush_addr
, size
);
2683 dma_dom
->need_flush
= false;
2688 * The exported map_single function for dma_ops.
2690 static dma_addr_t
map_page(struct device
*dev
, struct page
*page
,
2691 unsigned long offset
, size_t size
,
2692 enum dma_data_direction dir
,
2693 struct dma_attrs
*attrs
)
2695 unsigned long flags
;
2696 struct protection_domain
*domain
;
2699 phys_addr_t paddr
= page_to_phys(page
) + offset
;
2701 INC_STATS_COUNTER(cnt_map_single
);
2703 domain
= get_domain(dev
);
2704 if (PTR_ERR(domain
) == -EINVAL
)
2705 return (dma_addr_t
)paddr
;
2706 else if (IS_ERR(domain
))
2707 return DMA_ERROR_CODE
;
2709 dma_mask
= *dev
->dma_mask
;
2711 spin_lock_irqsave(&domain
->lock
, flags
);
2713 addr
= __map_single(dev
, domain
->priv
, paddr
, size
, dir
, false,
2715 if (addr
== DMA_ERROR_CODE
)
2718 domain_flush_complete(domain
);
2721 spin_unlock_irqrestore(&domain
->lock
, flags
);
2727 * The exported unmap_single function for dma_ops.
2729 static void unmap_page(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
2730 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
2732 unsigned long flags
;
2733 struct protection_domain
*domain
;
2735 INC_STATS_COUNTER(cnt_unmap_single
);
2737 domain
= get_domain(dev
);
2741 spin_lock_irqsave(&domain
->lock
, flags
);
2743 __unmap_single(domain
->priv
, dma_addr
, size
, dir
);
2745 domain_flush_complete(domain
);
2747 spin_unlock_irqrestore(&domain
->lock
, flags
);
2751 * This is a special map_sg function which is used if we should map a
2752 * device which is not handled by an AMD IOMMU in the system.
2754 static int map_sg_no_iommu(struct device
*dev
, struct scatterlist
*sglist
,
2755 int nelems
, int dir
)
2757 struct scatterlist
*s
;
2760 for_each_sg(sglist
, s
, nelems
, i
) {
2761 s
->dma_address
= (dma_addr_t
)sg_phys(s
);
2762 s
->dma_length
= s
->length
;
2769 * The exported map_sg function for dma_ops (handles scatter-gather
2772 static int map_sg(struct device
*dev
, struct scatterlist
*sglist
,
2773 int nelems
, enum dma_data_direction dir
,
2774 struct dma_attrs
*attrs
)
2776 unsigned long flags
;
2777 struct protection_domain
*domain
;
2779 struct scatterlist
*s
;
2781 int mapped_elems
= 0;
2784 INC_STATS_COUNTER(cnt_map_sg
);
2786 domain
= get_domain(dev
);
2787 if (PTR_ERR(domain
) == -EINVAL
)
2788 return map_sg_no_iommu(dev
, sglist
, nelems
, dir
);
2789 else if (IS_ERR(domain
))
2792 dma_mask
= *dev
->dma_mask
;
2794 spin_lock_irqsave(&domain
->lock
, flags
);
2796 for_each_sg(sglist
, s
, nelems
, i
) {
2799 s
->dma_address
= __map_single(dev
, domain
->priv
,
2800 paddr
, s
->length
, dir
, false,
2803 if (s
->dma_address
) {
2804 s
->dma_length
= s
->length
;
2810 domain_flush_complete(domain
);
2813 spin_unlock_irqrestore(&domain
->lock
, flags
);
2815 return mapped_elems
;
2817 for_each_sg(sglist
, s
, mapped_elems
, i
) {
2819 __unmap_single(domain
->priv
, s
->dma_address
,
2820 s
->dma_length
, dir
);
2821 s
->dma_address
= s
->dma_length
= 0;
2830 * The exported map_sg function for dma_ops (handles scatter-gather
2833 static void unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
2834 int nelems
, enum dma_data_direction dir
,
2835 struct dma_attrs
*attrs
)
2837 unsigned long flags
;
2838 struct protection_domain
*domain
;
2839 struct scatterlist
*s
;
2842 INC_STATS_COUNTER(cnt_unmap_sg
);
2844 domain
= get_domain(dev
);
2848 spin_lock_irqsave(&domain
->lock
, flags
);
2850 for_each_sg(sglist
, s
, nelems
, i
) {
2851 __unmap_single(domain
->priv
, s
->dma_address
,
2852 s
->dma_length
, dir
);
2853 s
->dma_address
= s
->dma_length
= 0;
2856 domain_flush_complete(domain
);
2858 spin_unlock_irqrestore(&domain
->lock
, flags
);
2862 * The exported alloc_coherent function for dma_ops.
2864 static void *alloc_coherent(struct device
*dev
, size_t size
,
2865 dma_addr_t
*dma_addr
, gfp_t flag
,
2866 struct dma_attrs
*attrs
)
2868 unsigned long flags
;
2870 struct protection_domain
*domain
;
2872 u64 dma_mask
= dev
->coherent_dma_mask
;
2874 INC_STATS_COUNTER(cnt_alloc_coherent
);
2876 domain
= get_domain(dev
);
2877 if (PTR_ERR(domain
) == -EINVAL
) {
2878 virt_addr
= (void *)__get_free_pages(flag
, get_order(size
));
2879 *dma_addr
= __pa(virt_addr
);
2881 } else if (IS_ERR(domain
))
2884 dma_mask
= dev
->coherent_dma_mask
;
2885 flag
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
2888 virt_addr
= (void *)__get_free_pages(flag
, get_order(size
));
2892 paddr
= virt_to_phys(virt_addr
);
2895 dma_mask
= *dev
->dma_mask
;
2897 spin_lock_irqsave(&domain
->lock
, flags
);
2899 *dma_addr
= __map_single(dev
, domain
->priv
, paddr
,
2900 size
, DMA_BIDIRECTIONAL
, true, dma_mask
);
2902 if (*dma_addr
== DMA_ERROR_CODE
) {
2903 spin_unlock_irqrestore(&domain
->lock
, flags
);
2907 domain_flush_complete(domain
);
2909 spin_unlock_irqrestore(&domain
->lock
, flags
);
2915 free_pages((unsigned long)virt_addr
, get_order(size
));
2921 * The exported free_coherent function for dma_ops.
2923 static void free_coherent(struct device
*dev
, size_t size
,
2924 void *virt_addr
, dma_addr_t dma_addr
,
2925 struct dma_attrs
*attrs
)
2927 unsigned long flags
;
2928 struct protection_domain
*domain
;
2930 INC_STATS_COUNTER(cnt_free_coherent
);
2932 domain
= get_domain(dev
);
2936 spin_lock_irqsave(&domain
->lock
, flags
);
2938 __unmap_single(domain
->priv
, dma_addr
, size
, DMA_BIDIRECTIONAL
);
2940 domain_flush_complete(domain
);
2942 spin_unlock_irqrestore(&domain
->lock
, flags
);
2945 free_pages((unsigned long)virt_addr
, get_order(size
));
2949 * This function is called by the DMA layer to find out if we can handle a
2950 * particular device. It is part of the dma_ops.
2952 static int amd_iommu_dma_supported(struct device
*dev
, u64 mask
)
2954 return check_device(dev
);
2958 * The function for pre-allocating protection domains.
2960 * If the driver core informs the DMA layer if a driver grabs a device
2961 * we don't need to preallocate the protection domains anymore.
2962 * For now we have to.
2964 static void __init
prealloc_protection_domains(void)
2966 struct iommu_dev_data
*dev_data
;
2967 struct dma_ops_domain
*dma_dom
;
2968 struct pci_dev
*dev
= NULL
;
2971 for_each_pci_dev(dev
) {
2973 /* Do we handle this device? */
2974 if (!check_device(&dev
->dev
))
2977 dev_data
= get_dev_data(&dev
->dev
);
2978 if (!amd_iommu_force_isolation
&& dev_data
->iommu_v2
) {
2979 /* Make sure passthrough domain is allocated */
2980 alloc_passthrough_domain();
2981 dev_data
->passthrough
= true;
2982 attach_device(&dev
->dev
, pt_domain
);
2983 pr_info("AMD-Vi: Using passthough domain for device %s\n",
2984 dev_name(&dev
->dev
));
2987 /* Is there already any domain for it? */
2988 if (domain_for_device(&dev
->dev
))
2991 devid
= get_device_id(&dev
->dev
);
2993 dma_dom
= dma_ops_domain_alloc();
2996 init_unity_mappings_for_device(dma_dom
, devid
);
2997 dma_dom
->target_dev
= devid
;
2999 attach_device(&dev
->dev
, &dma_dom
->domain
);
3001 list_add_tail(&dma_dom
->list
, &iommu_pd_list
);
3005 static struct dma_map_ops amd_iommu_dma_ops
= {
3006 .alloc
= alloc_coherent
,
3007 .free
= free_coherent
,
3008 .map_page
= map_page
,
3009 .unmap_page
= unmap_page
,
3011 .unmap_sg
= unmap_sg
,
3012 .dma_supported
= amd_iommu_dma_supported
,
3015 static unsigned device_dma_ops_init(void)
3017 struct iommu_dev_data
*dev_data
;
3018 struct pci_dev
*pdev
= NULL
;
3019 unsigned unhandled
= 0;
3021 for_each_pci_dev(pdev
) {
3022 if (!check_device(&pdev
->dev
)) {
3024 iommu_ignore_device(&pdev
->dev
);
3030 dev_data
= get_dev_data(&pdev
->dev
);
3032 if (!dev_data
->passthrough
)
3033 pdev
->dev
.archdata
.dma_ops
= &amd_iommu_dma_ops
;
3035 pdev
->dev
.archdata
.dma_ops
= &nommu_dma_ops
;
3042 * The function which clues the AMD IOMMU driver into dma_ops.
3045 void __init
amd_iommu_init_api(void)
3047 bus_set_iommu(&pci_bus_type
, &amd_iommu_ops
);
3050 int __init
amd_iommu_init_dma_ops(void)
3052 struct amd_iommu
*iommu
;
3056 * first allocate a default protection domain for every IOMMU we
3057 * found in the system. Devices not assigned to any other
3058 * protection domain will be assigned to the default one.
3060 for_each_iommu(iommu
) {
3061 iommu
->default_dom
= dma_ops_domain_alloc();
3062 if (iommu
->default_dom
== NULL
)
3064 iommu
->default_dom
->domain
.flags
|= PD_DEFAULT_MASK
;
3065 ret
= iommu_init_unity_mappings(iommu
);
3071 * Pre-allocate the protection domains for each device.
3073 prealloc_protection_domains();
3078 /* Make the driver finally visible to the drivers */
3079 unhandled
= device_dma_ops_init();
3080 if (unhandled
&& max_pfn
> MAX_DMA32_PFN
) {
3081 /* There are unhandled devices - initialize swiotlb for them */
3085 amd_iommu_stats_init();
3087 if (amd_iommu_unmap_flush
)
3088 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
3090 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
3096 for_each_iommu(iommu
) {
3097 if (iommu
->default_dom
)
3098 dma_ops_domain_free(iommu
->default_dom
);
3104 /*****************************************************************************
3106 * The following functions belong to the exported interface of AMD IOMMU
3108 * This interface allows access to lower level functions of the IOMMU
3109 * like protection domain handling and assignement of devices to domains
3110 * which is not possible with the dma_ops interface.
3112 *****************************************************************************/
3114 static void cleanup_domain(struct protection_domain
*domain
)
3116 struct iommu_dev_data
*dev_data
, *next
;
3117 unsigned long flags
;
3119 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
3121 list_for_each_entry_safe(dev_data
, next
, &domain
->dev_list
, list
) {
3122 __detach_device(dev_data
);
3123 atomic_set(&dev_data
->bind
, 0);
3126 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
3129 static void protection_domain_free(struct protection_domain
*domain
)
3134 del_domain_from_list(domain
);
3137 domain_id_free(domain
->id
);
3142 static struct protection_domain
*protection_domain_alloc(void)
3144 struct protection_domain
*domain
;
3146 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
3150 spin_lock_init(&domain
->lock
);
3151 mutex_init(&domain
->api_lock
);
3152 domain
->id
= domain_id_alloc();
3155 INIT_LIST_HEAD(&domain
->dev_list
);
3157 add_domain_to_list(domain
);
3167 static int __init
alloc_passthrough_domain(void)
3169 if (pt_domain
!= NULL
)
3172 /* allocate passthrough domain */
3173 pt_domain
= protection_domain_alloc();
3177 pt_domain
->mode
= PAGE_MODE_NONE
;
3181 static int amd_iommu_domain_init(struct iommu_domain
*dom
)
3183 struct protection_domain
*domain
;
3185 domain
= protection_domain_alloc();
3189 domain
->mode
= PAGE_MODE_3_LEVEL
;
3190 domain
->pt_root
= (void *)get_zeroed_page(GFP_KERNEL
);
3191 if (!domain
->pt_root
)
3194 domain
->iommu_domain
= dom
;
3198 dom
->geometry
.aperture_start
= 0;
3199 dom
->geometry
.aperture_end
= ~0ULL;
3200 dom
->geometry
.force_aperture
= true;
3205 protection_domain_free(domain
);
3210 static void amd_iommu_domain_destroy(struct iommu_domain
*dom
)
3212 struct protection_domain
*domain
= dom
->priv
;
3217 if (domain
->dev_cnt
> 0)
3218 cleanup_domain(domain
);
3220 BUG_ON(domain
->dev_cnt
!= 0);
3222 if (domain
->mode
!= PAGE_MODE_NONE
)
3223 free_pagetable(domain
);
3225 if (domain
->flags
& PD_IOMMUV2_MASK
)
3226 free_gcr3_table(domain
);
3228 protection_domain_free(domain
);
3233 static void amd_iommu_detach_device(struct iommu_domain
*dom
,
3236 struct iommu_dev_data
*dev_data
= dev
->archdata
.iommu
;
3237 struct amd_iommu
*iommu
;
3240 if (!check_device(dev
))
3243 devid
= get_device_id(dev
);
3245 if (dev_data
->domain
!= NULL
)
3248 iommu
= amd_iommu_rlookup_table
[devid
];
3252 iommu_completion_wait(iommu
);
3255 static int amd_iommu_attach_device(struct iommu_domain
*dom
,
3258 struct protection_domain
*domain
= dom
->priv
;
3259 struct iommu_dev_data
*dev_data
;
3260 struct amd_iommu
*iommu
;
3263 if (!check_device(dev
))
3266 dev_data
= dev
->archdata
.iommu
;
3268 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
3272 if (dev_data
->domain
)
3275 ret
= attach_device(dev
, domain
);
3277 iommu_completion_wait(iommu
);
3282 static int amd_iommu_map(struct iommu_domain
*dom
, unsigned long iova
,
3283 phys_addr_t paddr
, size_t page_size
, int iommu_prot
)
3285 struct protection_domain
*domain
= dom
->priv
;
3289 if (domain
->mode
== PAGE_MODE_NONE
)
3292 if (iommu_prot
& IOMMU_READ
)
3293 prot
|= IOMMU_PROT_IR
;
3294 if (iommu_prot
& IOMMU_WRITE
)
3295 prot
|= IOMMU_PROT_IW
;
3297 mutex_lock(&domain
->api_lock
);
3298 ret
= iommu_map_page(domain
, iova
, paddr
, prot
, page_size
);
3299 mutex_unlock(&domain
->api_lock
);
3304 static size_t amd_iommu_unmap(struct iommu_domain
*dom
, unsigned long iova
,
3307 struct protection_domain
*domain
= dom
->priv
;
3310 if (domain
->mode
== PAGE_MODE_NONE
)
3313 mutex_lock(&domain
->api_lock
);
3314 unmap_size
= iommu_unmap_page(domain
, iova
, page_size
);
3315 mutex_unlock(&domain
->api_lock
);
3317 domain_flush_tlb_pde(domain
);
3322 static phys_addr_t
amd_iommu_iova_to_phys(struct iommu_domain
*dom
,
3325 struct protection_domain
*domain
= dom
->priv
;
3326 unsigned long offset_mask
;
3330 if (domain
->mode
== PAGE_MODE_NONE
)
3333 pte
= fetch_pte(domain
, iova
);
3335 if (!pte
|| !IOMMU_PTE_PRESENT(*pte
))
3338 if (PM_PTE_LEVEL(*pte
) == 0)
3339 offset_mask
= PAGE_SIZE
- 1;
3341 offset_mask
= PTE_PAGE_SIZE(*pte
) - 1;
3343 __pte
= *pte
& PM_ADDR_MASK
;
3344 paddr
= (__pte
& ~offset_mask
) | (iova
& offset_mask
);
3349 static int amd_iommu_domain_has_cap(struct iommu_domain
*domain
,
3353 case IOMMU_CAP_CACHE_COHERENCY
:
3360 static struct iommu_ops amd_iommu_ops
= {
3361 .domain_init
= amd_iommu_domain_init
,
3362 .domain_destroy
= amd_iommu_domain_destroy
,
3363 .attach_dev
= amd_iommu_attach_device
,
3364 .detach_dev
= amd_iommu_detach_device
,
3365 .map
= amd_iommu_map
,
3366 .unmap
= amd_iommu_unmap
,
3367 .iova_to_phys
= amd_iommu_iova_to_phys
,
3368 .domain_has_cap
= amd_iommu_domain_has_cap
,
3369 .pgsize_bitmap
= AMD_IOMMU_PGSIZES
,
3372 /*****************************************************************************
3374 * The next functions do a basic initialization of IOMMU for pass through
3377 * In passthrough mode the IOMMU is initialized and enabled but not used for
3378 * DMA-API translation.
3380 *****************************************************************************/
3382 int __init
amd_iommu_init_passthrough(void)
3384 struct iommu_dev_data
*dev_data
;
3385 struct pci_dev
*dev
= NULL
;
3386 struct amd_iommu
*iommu
;
3390 ret
= alloc_passthrough_domain();
3394 for_each_pci_dev(dev
) {
3395 if (!check_device(&dev
->dev
))
3398 dev_data
= get_dev_data(&dev
->dev
);
3399 dev_data
->passthrough
= true;
3401 devid
= get_device_id(&dev
->dev
);
3403 iommu
= amd_iommu_rlookup_table
[devid
];
3407 attach_device(&dev
->dev
, pt_domain
);
3410 amd_iommu_stats_init();
3412 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3417 /* IOMMUv2 specific functions */
3418 int amd_iommu_register_ppr_notifier(struct notifier_block
*nb
)
3420 return atomic_notifier_chain_register(&ppr_notifier
, nb
);
3422 EXPORT_SYMBOL(amd_iommu_register_ppr_notifier
);
3424 int amd_iommu_unregister_ppr_notifier(struct notifier_block
*nb
)
3426 return atomic_notifier_chain_unregister(&ppr_notifier
, nb
);
3428 EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier
);
3430 void amd_iommu_domain_direct_map(struct iommu_domain
*dom
)
3432 struct protection_domain
*domain
= dom
->priv
;
3433 unsigned long flags
;
3435 spin_lock_irqsave(&domain
->lock
, flags
);
3437 /* Update data structure */
3438 domain
->mode
= PAGE_MODE_NONE
;
3439 domain
->updated
= true;
3441 /* Make changes visible to IOMMUs */
3442 update_domain(domain
);
3444 /* Page-table is not visible to IOMMU anymore, so free it */
3445 free_pagetable(domain
);
3447 spin_unlock_irqrestore(&domain
->lock
, flags
);
3449 EXPORT_SYMBOL(amd_iommu_domain_direct_map
);
3451 int amd_iommu_domain_enable_v2(struct iommu_domain
*dom
, int pasids
)
3453 struct protection_domain
*domain
= dom
->priv
;
3454 unsigned long flags
;
3457 if (pasids
<= 0 || pasids
> (PASID_MASK
+ 1))
3460 /* Number of GCR3 table levels required */
3461 for (levels
= 0; (pasids
- 1) & ~0x1ff; pasids
>>= 9)
3464 if (levels
> amd_iommu_max_glx_val
)
3467 spin_lock_irqsave(&domain
->lock
, flags
);
3470 * Save us all sanity checks whether devices already in the
3471 * domain support IOMMUv2. Just force that the domain has no
3472 * devices attached when it is switched into IOMMUv2 mode.
3475 if (domain
->dev_cnt
> 0 || domain
->flags
& PD_IOMMUV2_MASK
)
3479 domain
->gcr3_tbl
= (void *)get_zeroed_page(GFP_ATOMIC
);
3480 if (domain
->gcr3_tbl
== NULL
)
3483 domain
->glx
= levels
;
3484 domain
->flags
|= PD_IOMMUV2_MASK
;
3485 domain
->updated
= true;
3487 update_domain(domain
);
3492 spin_unlock_irqrestore(&domain
->lock
, flags
);
3496 EXPORT_SYMBOL(amd_iommu_domain_enable_v2
);
3498 static int __flush_pasid(struct protection_domain
*domain
, int pasid
,
3499 u64 address
, bool size
)
3501 struct iommu_dev_data
*dev_data
;
3502 struct iommu_cmd cmd
;
3505 if (!(domain
->flags
& PD_IOMMUV2_MASK
))
3508 build_inv_iommu_pasid(&cmd
, domain
->id
, pasid
, address
, size
);
3511 * IOMMU TLB needs to be flushed before Device TLB to
3512 * prevent device TLB refill from IOMMU TLB
3514 for (i
= 0; i
< amd_iommus_present
; ++i
) {
3515 if (domain
->dev_iommu
[i
] == 0)
3518 ret
= iommu_queue_command(amd_iommus
[i
], &cmd
);
3523 /* Wait until IOMMU TLB flushes are complete */
3524 domain_flush_complete(domain
);
3526 /* Now flush device TLBs */
3527 list_for_each_entry(dev_data
, &domain
->dev_list
, list
) {
3528 struct amd_iommu
*iommu
;
3531 BUG_ON(!dev_data
->ats
.enabled
);
3533 qdep
= dev_data
->ats
.qdep
;
3534 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
3536 build_inv_iotlb_pasid(&cmd
, dev_data
->devid
, pasid
,
3537 qdep
, address
, size
);
3539 ret
= iommu_queue_command(iommu
, &cmd
);
3544 /* Wait until all device TLBs are flushed */
3545 domain_flush_complete(domain
);
3554 static int __amd_iommu_flush_page(struct protection_domain
*domain
, int pasid
,
3557 INC_STATS_COUNTER(invalidate_iotlb
);
3559 return __flush_pasid(domain
, pasid
, address
, false);
3562 int amd_iommu_flush_page(struct iommu_domain
*dom
, int pasid
,
3565 struct protection_domain
*domain
= dom
->priv
;
3566 unsigned long flags
;
3569 spin_lock_irqsave(&domain
->lock
, flags
);
3570 ret
= __amd_iommu_flush_page(domain
, pasid
, address
);
3571 spin_unlock_irqrestore(&domain
->lock
, flags
);
3575 EXPORT_SYMBOL(amd_iommu_flush_page
);
3577 static int __amd_iommu_flush_tlb(struct protection_domain
*domain
, int pasid
)
3579 INC_STATS_COUNTER(invalidate_iotlb_all
);
3581 return __flush_pasid(domain
, pasid
, CMD_INV_IOMMU_ALL_PAGES_ADDRESS
,
3585 int amd_iommu_flush_tlb(struct iommu_domain
*dom
, int pasid
)
3587 struct protection_domain
*domain
= dom
->priv
;
3588 unsigned long flags
;
3591 spin_lock_irqsave(&domain
->lock
, flags
);
3592 ret
= __amd_iommu_flush_tlb(domain
, pasid
);
3593 spin_unlock_irqrestore(&domain
->lock
, flags
);
3597 EXPORT_SYMBOL(amd_iommu_flush_tlb
);
3599 static u64
*__get_gcr3_pte(u64
*root
, int level
, int pasid
, bool alloc
)
3606 index
= (pasid
>> (9 * level
)) & 0x1ff;
3612 if (!(*pte
& GCR3_VALID
)) {
3616 root
= (void *)get_zeroed_page(GFP_ATOMIC
);
3620 *pte
= __pa(root
) | GCR3_VALID
;
3623 root
= __va(*pte
& PAGE_MASK
);
3631 static int __set_gcr3(struct protection_domain
*domain
, int pasid
,
3636 if (domain
->mode
!= PAGE_MODE_NONE
)
3639 pte
= __get_gcr3_pte(domain
->gcr3_tbl
, domain
->glx
, pasid
, true);
3643 *pte
= (cr3
& PAGE_MASK
) | GCR3_VALID
;
3645 return __amd_iommu_flush_tlb(domain
, pasid
);
3648 static int __clear_gcr3(struct protection_domain
*domain
, int pasid
)
3652 if (domain
->mode
!= PAGE_MODE_NONE
)
3655 pte
= __get_gcr3_pte(domain
->gcr3_tbl
, domain
->glx
, pasid
, false);
3661 return __amd_iommu_flush_tlb(domain
, pasid
);
3664 int amd_iommu_domain_set_gcr3(struct iommu_domain
*dom
, int pasid
,
3667 struct protection_domain
*domain
= dom
->priv
;
3668 unsigned long flags
;
3671 spin_lock_irqsave(&domain
->lock
, flags
);
3672 ret
= __set_gcr3(domain
, pasid
, cr3
);
3673 spin_unlock_irqrestore(&domain
->lock
, flags
);
3677 EXPORT_SYMBOL(amd_iommu_domain_set_gcr3
);
3679 int amd_iommu_domain_clear_gcr3(struct iommu_domain
*dom
, int pasid
)
3681 struct protection_domain
*domain
= dom
->priv
;
3682 unsigned long flags
;
3685 spin_lock_irqsave(&domain
->lock
, flags
);
3686 ret
= __clear_gcr3(domain
, pasid
);
3687 spin_unlock_irqrestore(&domain
->lock
, flags
);
3691 EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3
);
3693 int amd_iommu_complete_ppr(struct pci_dev
*pdev
, int pasid
,
3694 int status
, int tag
)
3696 struct iommu_dev_data
*dev_data
;
3697 struct amd_iommu
*iommu
;
3698 struct iommu_cmd cmd
;
3700 INC_STATS_COUNTER(complete_ppr
);
3702 dev_data
= get_dev_data(&pdev
->dev
);
3703 iommu
= amd_iommu_rlookup_table
[dev_data
->devid
];
3705 build_complete_ppr(&cmd
, dev_data
->devid
, pasid
, status
,
3706 tag
, dev_data
->pri_tlp
);
3708 return iommu_queue_command(iommu
, &cmd
);
3710 EXPORT_SYMBOL(amd_iommu_complete_ppr
);
3712 struct iommu_domain
*amd_iommu_get_v2_domain(struct pci_dev
*pdev
)
3714 struct protection_domain
*domain
;
3716 domain
= get_domain(&pdev
->dev
);
3720 /* Only return IOMMUv2 domains */
3721 if (!(domain
->flags
& PD_IOMMUV2_MASK
))
3724 return domain
->iommu_domain
;
3726 EXPORT_SYMBOL(amd_iommu_get_v2_domain
);
3728 void amd_iommu_enable_device_erratum(struct pci_dev
*pdev
, u32 erratum
)
3730 struct iommu_dev_data
*dev_data
;
3732 if (!amd_iommu_v2_supported())
3735 dev_data
= get_dev_data(&pdev
->dev
);
3736 dev_data
->errata
|= (1 << erratum
);
3738 EXPORT_SYMBOL(amd_iommu_enable_device_erratum
);
3740 int amd_iommu_device_info(struct pci_dev
*pdev
,
3741 struct amd_iommu_device_info
*info
)
3746 if (pdev
== NULL
|| info
== NULL
)
3749 if (!amd_iommu_v2_supported())
3752 memset(info
, 0, sizeof(*info
));
3754 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ATS
);
3756 info
->flags
|= AMD_IOMMU_DEVICE_FLAG_ATS_SUP
;
3758 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_PRI
);
3760 info
->flags
|= AMD_IOMMU_DEVICE_FLAG_PRI_SUP
;
3762 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_PASID
);
3766 max_pasids
= 1 << (9 * (amd_iommu_max_glx_val
+ 1));
3767 max_pasids
= min(max_pasids
, (1 << 20));
3769 info
->flags
|= AMD_IOMMU_DEVICE_FLAG_PASID_SUP
;
3770 info
->max_pasids
= min(pci_max_pasids(pdev
), max_pasids
);
3772 features
= pci_pasid_features(pdev
);
3773 if (features
& PCI_PASID_CAP_EXEC
)
3774 info
->flags
|= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP
;
3775 if (features
& PCI_PASID_CAP_PRIV
)
3776 info
->flags
|= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP
;
3781 EXPORT_SYMBOL(amd_iommu_device_info
);
3783 #ifdef CONFIG_IRQ_REMAP
3785 /*****************************************************************************
3787 * Interrupt Remapping Implementation
3789 *****************************************************************************/
3806 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
3807 #define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
3808 #define DTE_IRQ_TABLE_LEN (8ULL << 1)
3809 #define DTE_IRQ_REMAP_ENABLE 1ULL
3811 static void set_dte_irq_entry(u16 devid
, struct irq_remap_table
*table
)
3815 dte
= amd_iommu_dev_table
[devid
].data
[2];
3816 dte
&= ~DTE_IRQ_PHYS_ADDR_MASK
;
3817 dte
|= virt_to_phys(table
->table
);
3818 dte
|= DTE_IRQ_REMAP_INTCTL
;
3819 dte
|= DTE_IRQ_TABLE_LEN
;
3820 dte
|= DTE_IRQ_REMAP_ENABLE
;
3822 amd_iommu_dev_table
[devid
].data
[2] = dte
;
3825 #define IRTE_ALLOCATED (~1U)
3827 static struct irq_remap_table
*get_irq_table(u16 devid
, bool ioapic
)
3829 struct irq_remap_table
*table
= NULL
;
3830 struct amd_iommu
*iommu
;
3831 unsigned long flags
;
3834 write_lock_irqsave(&amd_iommu_devtable_lock
, flags
);
3836 iommu
= amd_iommu_rlookup_table
[devid
];
3840 table
= irq_lookup_table
[devid
];
3844 alias
= amd_iommu_alias_table
[devid
];
3845 table
= irq_lookup_table
[alias
];
3847 irq_lookup_table
[devid
] = table
;
3848 set_dte_irq_entry(devid
, table
);
3849 iommu_flush_dte(iommu
, devid
);
3853 /* Nothing there yet, allocate new irq remapping table */
3854 table
= kzalloc(sizeof(*table
), GFP_ATOMIC
);
3859 /* Keep the first 32 indexes free for IOAPIC interrupts */
3860 table
->min_index
= 32;
3862 table
->table
= kmem_cache_alloc(amd_iommu_irq_cache
, GFP_ATOMIC
);
3863 if (!table
->table
) {
3868 memset(table
->table
, 0, MAX_IRQS_PER_TABLE
* sizeof(u32
));
3873 for (i
= 0; i
< 32; ++i
)
3874 table
->table
[i
] = IRTE_ALLOCATED
;
3877 irq_lookup_table
[devid
] = table
;
3878 set_dte_irq_entry(devid
, table
);
3879 iommu_flush_dte(iommu
, devid
);
3880 if (devid
!= alias
) {
3881 irq_lookup_table
[alias
] = table
;
3882 set_dte_irq_entry(devid
, table
);
3883 iommu_flush_dte(iommu
, alias
);
3887 iommu_completion_wait(iommu
);
3890 write_unlock_irqrestore(&amd_iommu_devtable_lock
, flags
);
3895 static int alloc_irq_index(struct irq_cfg
*cfg
, u16 devid
, int count
)
3897 struct irq_remap_table
*table
;
3898 unsigned long flags
;
3901 table
= get_irq_table(devid
, false);
3905 spin_lock_irqsave(&table
->lock
, flags
);
3907 /* Scan table for free entries */
3908 for (c
= 0, index
= table
->min_index
;
3909 index
< MAX_IRQS_PER_TABLE
;
3911 if (table
->table
[index
] == 0)
3917 struct irq_2_iommu
*irte_info
;
3920 table
->table
[index
- c
+ 1] = IRTE_ALLOCATED
;
3924 irte_info
= &cfg
->irq_2_iommu
;
3925 irte_info
->sub_handle
= devid
;
3926 irte_info
->irte_index
= index
;
3927 irte_info
->iommu
= (void *)cfg
;
3936 spin_unlock_irqrestore(&table
->lock
, flags
);
3941 static int get_irte(u16 devid
, int index
, union irte
*irte
)
3943 struct irq_remap_table
*table
;
3944 unsigned long flags
;
3946 table
= get_irq_table(devid
, false);
3950 spin_lock_irqsave(&table
->lock
, flags
);
3951 irte
->val
= table
->table
[index
];
3952 spin_unlock_irqrestore(&table
->lock
, flags
);
3957 static int modify_irte(u16 devid
, int index
, union irte irte
)
3959 struct irq_remap_table
*table
;
3960 struct amd_iommu
*iommu
;
3961 unsigned long flags
;
3963 iommu
= amd_iommu_rlookup_table
[devid
];
3967 table
= get_irq_table(devid
, false);
3971 spin_lock_irqsave(&table
->lock
, flags
);
3972 table
->table
[index
] = irte
.val
;
3973 spin_unlock_irqrestore(&table
->lock
, flags
);
3975 iommu_flush_irt(iommu
, devid
);
3976 iommu_completion_wait(iommu
);
3981 static void free_irte(u16 devid
, int index
)
3983 struct irq_remap_table
*table
;
3984 struct amd_iommu
*iommu
;
3985 unsigned long flags
;
3987 iommu
= amd_iommu_rlookup_table
[devid
];
3991 table
= get_irq_table(devid
, false);
3995 spin_lock_irqsave(&table
->lock
, flags
);
3996 table
->table
[index
] = 0;
3997 spin_unlock_irqrestore(&table
->lock
, flags
);
3999 iommu_flush_irt(iommu
, devid
);
4000 iommu_completion_wait(iommu
);
4003 static int setup_ioapic_entry(int irq
, struct IO_APIC_route_entry
*entry
,
4004 unsigned int destination
, int vector
,
4005 struct io_apic_irq_attr
*attr
)
4007 struct irq_remap_table
*table
;
4008 struct irq_2_iommu
*irte_info
;
4009 struct irq_cfg
*cfg
;
4016 cfg
= irq_get_chip_data(irq
);
4020 irte_info
= &cfg
->irq_2_iommu
;
4021 ioapic_id
= mpc_ioapic_id(attr
->ioapic
);
4022 devid
= get_ioapic_devid(ioapic_id
);
4027 table
= get_irq_table(devid
, true);
4031 index
= attr
->ioapic_pin
;
4033 /* Setup IRQ remapping info */
4034 irte_info
->sub_handle
= devid
;
4035 irte_info
->irte_index
= index
;
4036 irte_info
->iommu
= (void *)cfg
;
4038 /* Setup IRTE for IOMMU */
4040 irte
.fields
.vector
= vector
;
4041 irte
.fields
.int_type
= apic
->irq_delivery_mode
;
4042 irte
.fields
.destination
= destination
;
4043 irte
.fields
.dm
= apic
->irq_dest_mode
;
4044 irte
.fields
.valid
= 1;
4046 ret
= modify_irte(devid
, index
, irte
);
4050 /* Setup IOAPIC entry */
4051 memset(entry
, 0, sizeof(*entry
));
4053 entry
->vector
= index
;
4055 entry
->trigger
= attr
->trigger
;
4056 entry
->polarity
= attr
->polarity
;
4059 * Mask level triggered irqs.
4060 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
4068 static int set_affinity(struct irq_data
*data
, const struct cpumask
*mask
,
4071 struct irq_2_iommu
*irte_info
;
4072 unsigned int dest
, irq
;
4073 struct irq_cfg
*cfg
;
4077 if (!config_enabled(CONFIG_SMP
))
4080 cfg
= data
->chip_data
;
4082 irte_info
= &cfg
->irq_2_iommu
;
4084 if (!cpumask_intersects(mask
, cpu_online_mask
))
4087 if (get_irte(irte_info
->sub_handle
, irte_info
->irte_index
, &irte
))
4090 if (assign_irq_vector(irq
, cfg
, mask
))
4093 err
= apic
->cpu_mask_to_apicid_and(cfg
->domain
, mask
, &dest
);
4095 if (assign_irq_vector(irq
, cfg
, data
->affinity
))
4096 pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq
);
4100 irte
.fields
.vector
= cfg
->vector
;
4101 irte
.fields
.destination
= dest
;
4103 modify_irte(irte_info
->sub_handle
, irte_info
->irte_index
, irte
);
4105 if (cfg
->move_in_progress
)
4106 send_cleanup_vector(cfg
);
4108 cpumask_copy(data
->affinity
, mask
);
4113 static int free_irq(int irq
)
4115 struct irq_2_iommu
*irte_info
;
4116 struct irq_cfg
*cfg
;
4118 cfg
= irq_get_chip_data(irq
);
4122 irte_info
= &cfg
->irq_2_iommu
;
4124 free_irte(irte_info
->sub_handle
, irte_info
->irte_index
);
4129 static void compose_msi_msg(struct pci_dev
*pdev
,
4130 unsigned int irq
, unsigned int dest
,
4131 struct msi_msg
*msg
, u8 hpet_id
)
4133 struct irq_2_iommu
*irte_info
;
4134 struct irq_cfg
*cfg
;
4137 cfg
= irq_get_chip_data(irq
);
4141 irte_info
= &cfg
->irq_2_iommu
;
4144 irte
.fields
.vector
= cfg
->vector
;
4145 irte
.fields
.int_type
= apic
->irq_delivery_mode
;
4146 irte
.fields
.destination
= dest
;
4147 irte
.fields
.dm
= apic
->irq_dest_mode
;
4148 irte
.fields
.valid
= 1;
4150 modify_irte(irte_info
->sub_handle
, irte_info
->irte_index
, irte
);
4152 msg
->address_hi
= MSI_ADDR_BASE_HI
;
4153 msg
->address_lo
= MSI_ADDR_BASE_LO
;
4154 msg
->data
= irte_info
->irte_index
;
4157 static int msi_alloc_irq(struct pci_dev
*pdev
, int irq
, int nvec
)
4159 struct irq_cfg
*cfg
;
4166 cfg
= irq_get_chip_data(irq
);
4170 devid
= get_device_id(&pdev
->dev
);
4171 index
= alloc_irq_index(cfg
, devid
, nvec
);
4173 return index
< 0 ? MAX_IRQS_PER_TABLE
: index
;
4176 static int msi_setup_irq(struct pci_dev
*pdev
, unsigned int irq
,
4177 int index
, int offset
)
4179 struct irq_2_iommu
*irte_info
;
4180 struct irq_cfg
*cfg
;
4186 cfg
= irq_get_chip_data(irq
);
4190 if (index
>= MAX_IRQS_PER_TABLE
)
4193 devid
= get_device_id(&pdev
->dev
);
4194 irte_info
= &cfg
->irq_2_iommu
;
4196 irte_info
->sub_handle
= devid
;
4197 irte_info
->irte_index
= index
+ offset
;
4198 irte_info
->iommu
= (void *)cfg
;