static LIST_HEAD(iommu_pd_list);
static DEFINE_SPINLOCK(iommu_pd_list_lock);
-#ifdef CONFIG_IOMMU_API
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+static struct protection_domain *pt_domain;
+
- #ifdef CONFIG_IOMMU_API
static struct iommu_ops amd_iommu_ops;
-#endif
/*
* general struct to manage commands send to an IOMMU
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages);
+static void reset_iommu_command_buffer(struct amd_iommu *iommu);
+ static u64 *fetch_pte(struct protection_domain *domain,
+ unsigned long address, int map_size);
+ static void update_domain(struct protection_domain *domain);
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
-
#ifdef CONFIG_AMD_IOMMU_STATS
/*
}
}
+static void reset_iommu_command_buffer(struct amd_iommu *iommu)
+{
+ pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
+
+ if (iommu->reset_in_progress)
+ panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
+
+ iommu->reset_in_progress = true;
+
+ amd_iommu_reset_cmd_buffer(iommu);
+ flush_all_devices_for_iommu(iommu);
+ flush_all_domains_on_iommu(iommu);
+
+ iommu->reset_in_progress = false;
+}
+
+ void amd_iommu_flush_all_devices(void)
+ {
+ flush_devices_by_domain(NULL);
+ }
+
/****************************************************************************
*
* The functions below are used the create the page table mappings for
return dom;
}
- /*
- * If a device is not yet associated with a domain, this function does
- * assigns it visible for the hardware
- */
- static void __attach_device(struct amd_iommu *iommu,
- struct protection_domain *domain,
- u16 devid)
+ static void set_dte_entry(u16 devid, struct protection_domain *domain)
{
- u64 pte_root;
-
- /* lock domain */
- spin_lock(&domain->lock);
-
- pte_root = virt_to_phys(domain->pt_root);
+ u64 pte_root = virt_to_phys(domain->pt_root);
- unsigned long flags;
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
amd_iommu_dev_table[devid].data[2] = domain->id;
+ amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
+ amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
amd_iommu_pd_table[devid] = domain;
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
++}
++
++/*
++ * If a device is not yet associated with a domain, this function does
++ * assigns it visible for the hardware
++ */
++static void __attach_device(struct amd_iommu *iommu,
++ struct protection_domain *domain,
++ u16 devid)
++{
++ /* lock domain */
++ spin_lock(&domain->lock);
++
++ /* update DTE entry */
++ set_dte_entry(devid, domain);
+
+ domain->dev_cnt += 1;
+
+ /* ready */
+ spin_unlock(&domain->lock);
}
+ /*
+ * If a device is not yet associated with a domain, this function does
+ * assigns it visible for the hardware
+ */
static void attach_device(struct amd_iommu *iommu,
struct protection_domain *domain,
u16 devid)
return 1;
}
+ static void update_device_table(struct protection_domain *domain)
+ {
++ unsigned long flags;
+ int i;
+
+ for (i = 0; i <= amd_iommu_last_bdf; ++i) {
+ if (amd_iommu_pd_table[i] != domain)
+ continue;
++ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ set_dte_entry(i, domain);
++ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ }
+ }
+
+ static void update_domain(struct protection_domain *domain)
+ {
+ if (!domain->updated)
+ return;
+
+ update_device_table(domain);
+ flush_devices_by_domain(domain);
+ iommu_flush_domain(domain->id);
+
+ domain->updated = false;
+ }
+
/*
- * If the pte_page is not yet allocated this function is called
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
*/
- static u64* alloc_pte(struct protection_domain *dom,
- unsigned long address, u64 **pte_page, gfp_t gfp)
+ static bool increase_address_space(struct protection_domain *domain,
+ gfp_t gfp)
+ {
+ u64 *pte;
+
+ if (domain->mode == PAGE_MODE_6_LEVEL)
+ /* address space already 64 bit large */
+ return false;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ return false;
+
+ *pte = PM_LEVEL_PDE(domain->mode,
+ virt_to_phys(domain->pt_root));
+ domain->pt_root = pte;
+ domain->mode += 1;
+ domain->updated = true;
+
+ return true;
+ }
+
+ static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ int end_lvl,
+ u64 **pte_page,
+ gfp_t gfp)
{
u64 *pte, *page;
+ int level;
- pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)];
+ while (address > PM_LEVEL_SIZE(domain->mode))
+ increase_address_space(domain, gfp);
- if (!IOMMU_PTE_PRESENT(*pte)) {
- page = (u64 *)get_zeroed_page(gfp);
- if (!page)
- return NULL;
- *pte = IOMMU_L2_PDE(virt_to_phys(page));
- }
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[IOMMU_PTE_L1_INDEX(address)];
+ while (level > end_lvl) {
+ if (!IOMMU_PTE_PRESENT(*pte)) {
+ page = (u64 *)get_zeroed_page(gfp);
+ if (!page)
+ return NULL;
+ *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
+ }
- if (!IOMMU_PTE_PRESENT(*pte)) {
- page = (u64 *)get_zeroed_page(gfp);
- if (!page)
- return NULL;
- *pte = IOMMU_L1_PDE(virt_to_phys(page));
- }
+ level -= 1;
- pte = IOMMU_PTE_PAGE(*pte);
+ pte = IOMMU_PTE_PAGE(*pte);
- if (pte_page)
- *pte_page = pte;
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
- pte = &pte[IOMMU_PTE_L0_INDEX(address)];
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
return pte;
}