]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/pci/intel-iommu.c
VT-d: remove now unused intel_iommu_found function
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / intel-iommu.c
index f0a21995b135e3d37e9e072d3866ce04d42b8a6c..ecb5fd3b71f797de762ad8db617c1a4ca707cfc8 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/timer.h>
 #include <linux/iova.h>
+#include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -230,6 +231,7 @@ struct dmar_domain {
        int             iommu_coherency;/* indicate coherency of iommu access */
        int             iommu_count;    /* reference count of iommu */
        spinlock_t      iommu_lock;     /* protect iommu set in domain */
+       u64             max_addr;       /* maximum mapped address */
 };
 
 /* PCI domain-device relationship */
@@ -275,6 +277,8 @@ static int intel_iommu_strict;
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
+static struct iommu_ops intel_iommu_ops;
+
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
@@ -1216,6 +1220,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
 
 
 static void domain_exit(struct dmar_domain *domain);
+static void vm_domain_exit(struct dmar_domain *domain);
 
 void free_dmar_iommu(struct intel_iommu *iommu)
 {
@@ -1229,8 +1234,12 @@ void free_dmar_iommu(struct intel_iommu *iommu)
                clear_bit(i, iommu->domain_ids);
 
                spin_lock_irqsave(&domain->iommu_lock, flags);
-               if (--domain->iommu_count == 0)
-                       domain_exit(domain);
+               if (--domain->iommu_count == 0) {
+                       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_exit(domain);
+                       else
+                               domain_exit(domain);
+               }
                spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
                i = find_next_bit(iommu->domain_ids,
@@ -1449,6 +1458,11 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        struct context_entry *context;
        unsigned long flags;
        struct intel_iommu *iommu;
+       struct dma_pte *pgd;
+       unsigned long num;
+       unsigned long ndomains;
+       int id;
+       int agaw;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1467,9 +1481,53 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                return 0;
        }
 
-       context_set_domain_id(context, domain->id);
-       context_set_address_width(context, domain->agaw);
-       context_set_address_root(context, virt_to_phys(domain->pgd));
+       id = domain->id;
+       pgd = domain->pgd;
+
+       if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+               int found = 0;
+
+               /* find an available domain id for this device in iommu */
+               ndomains = cap_ndoms(iommu->cap);
+               num = find_first_bit(iommu->domain_ids, ndomains);
+               for (; num < ndomains; ) {
+                       if (iommu->domains[num] == domain) {
+                               id = num;
+                               found = 1;
+                               break;
+                       }
+                       num = find_next_bit(iommu->domain_ids,
+                                           cap_ndoms(iommu->cap), num+1);
+               }
+
+               if (found == 0) {
+                       num = find_first_zero_bit(iommu->domain_ids, ndomains);
+                       if (num >= ndomains) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               printk(KERN_ERR "IOMMU: no free domain ids\n");
+                               return -EFAULT;
+                       }
+
+                       set_bit(num, iommu->domain_ids);
+                       iommu->domains[num] = domain;
+                       id = num;
+               }
+
+               /* Skip top levels of page tables for
+                * iommu which has less agaw than default.
+                */
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd)) {
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       context_set_domain_id(context, id);
+       context_set_address_width(context, iommu->agaw);
+       context_set_address_root(context, virt_to_phys(pgd));
        context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
        context_set_fault_enable(context);
        context_set_present(context);
@@ -2673,6 +2731,9 @@ int __init intel_iommu_init(void)
        init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
+
+       register_iommu(&intel_iommu_ops);
+
        return 0;
 }
 
@@ -2792,7 +2853,98 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
        spin_unlock_irqrestore(&device_domain_lock, flags1);
 }
 
-void intel_iommu_domain_exit(struct dmar_domain *domain)
+/* domain id for virtual machine, it won't be set in context */
+static unsigned long vm_domid;
+
+static int vm_domain_min_agaw(struct dmar_domain *domain)
+{
+       int i;
+       int min_agaw = domain->agaw;
+
+       i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
+       for (; i < g_num_of_iommus; ) {
+               if (min_agaw > g_iommus[i]->agaw)
+                       min_agaw = g_iommus[i]->agaw;
+
+               i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
+       }
+
+       return min_agaw;
+}
+
+static struct dmar_domain *iommu_alloc_vm_domain(void)
+{
+       struct dmar_domain *domain;
+
+       domain = alloc_domain_mem();
+       if (!domain)
+               return NULL;
+
+       domain->id = vm_domid++;
+       memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
+       domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
+
+       return domain;
+}
+
+static int vm_domain_init(struct dmar_domain *domain, int guest_width)
+{
+       int adjust_width;
+
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
+       spin_lock_init(&domain->mapping_lock);
+       spin_lock_init(&domain->iommu_lock);
+
+       domain_reserve_special_ranges(domain);
+
+       /* calculate AGAW */
+       domain->gaw = guest_width;
+       adjust_width = guestwidth_to_adjustwidth(guest_width);
+       domain->agaw = width_to_agaw(adjust_width);
+
+       INIT_LIST_HEAD(&domain->devices);
+
+       domain->iommu_count = 0;
+       domain->iommu_coherency = 0;
+       domain->max_addr = 0;
+
+       /* always allocate the top pgd */
+       domain->pgd = (struct dma_pte *)alloc_pgtable_page();
+       if (!domain->pgd)
+               return -ENOMEM;
+       domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+       return 0;
+}
+
+static void iommu_free_vm_domain(struct dmar_domain *domain)
+{
+       unsigned long flags;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long i;
+       unsigned long ndomains;
+
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = drhd->iommu;
+
+               ndomains = cap_ndoms(iommu->cap);
+               i = find_first_bit(iommu->domain_ids, ndomains);
+               for (; i < ndomains; ) {
+                       if (iommu->domains[i] == domain) {
+                               spin_lock_irqsave(&iommu->lock, flags);
+                               clear_bit(i, iommu->domain_ids);
+                               iommu->domains[i] = NULL;
+                               spin_unlock_irqrestore(&iommu->lock, flags);
+                               break;
+                       }
+                       i = find_next_bit(iommu->domain_ids, ndomains, i+1);
+               }
+       }
+}
+
+static void vm_domain_exit(struct dmar_domain *domain)
 {
        u64 end;
 
@@ -2800,6 +2952,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        if (!domain)
                return;
 
+       vm_domain_remove_all_dev_info(domain);
+       /* destroy iovas */
+       put_iova_domain(&domain->iovad);
        end = DOMAIN_MAX_ADDR(domain->gaw);
        end = end & (~VTD_PAGE_MASK);
 
@@ -2809,97 +2964,167 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
        /* free page tables */
        dma_pte_free_pagetable(domain, 0, end);
 
-       iommu_free_domain(domain);
+       iommu_free_vm_domain(domain);
        free_domain_mem(domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_exit);
 
-struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev)
+static int intel_iommu_domain_init(struct iommu_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct dmar_domain *domain;
-       struct intel_iommu *iommu;
-
-       drhd = dmar_find_matched_drhd_unit(pdev);
-       if (!drhd) {
-               printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n");
-               return NULL;
-       }
+       struct dmar_domain *dmar_domain;
 
-       iommu = drhd->iommu;
-       if (!iommu) {
-               printk(KERN_ERR
-                       "intel_iommu_domain_alloc: iommu == NULL\n");
-               return NULL;
-       }
-       domain = iommu_alloc_domain(iommu);
-       if (!domain) {
+       dmar_domain = iommu_alloc_vm_domain();
+       if (!dmar_domain) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain == NULL\n");
-               return NULL;
+                       "intel_iommu_domain_init: dmar_domain == NULL\n");
+               return -ENOMEM;
        }
-       if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+       if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                printk(KERN_ERR
-                       "intel_iommu_domain_alloc: domain_init() failed\n");
-               intel_iommu_domain_exit(domain);
-               return NULL;
+                       "intel_iommu_domain_init() failed\n");
+               vm_domain_exit(dmar_domain);
+               return -ENOMEM;
        }
-       return domain;
+       domain->priv = dmar_domain;
+
+       return 0;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc);
 
-int intel_iommu_context_mapping(
-       struct dmar_domain *domain, struct pci_dev *pdev)
+static void intel_iommu_domain_destroy(struct iommu_domain *domain)
 {
-       int rc;
-       rc = domain_context_mapping(domain, pdev);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+
+       domain->priv = NULL;
+       vm_domain_exit(dmar_domain);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_context_mapping);
 
-int intel_iommu_page_mapping(
-       struct dmar_domain *domain, dma_addr_t iova,
-       u64 hpa, size_t size, int prot)
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
-       int rc;
-       rc = domain_page_mapping(domain, iova, hpa, size, prot);
-       return rc;
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct intel_iommu *iommu;
+       int addr_width;
+       u64 end;
+       int ret;
+
+       /* normally pdev is not mapped */
+       if (unlikely(domain_context_mapped(pdev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(pdev);
+               if (old_domain) {
+                       if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+                               vm_domain_remove_one_dev_info(old_domain, pdev);
+                       else
+                               domain_remove_dev_info(old_domain);
+               }
+       }
+
+       iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       /* check if this iommu agaw is sufficient for max mapped address */
+       addr_width = agaw_to_width(iommu->agaw);
+       end = DOMAIN_MAX_ADDR(addr_width);
+       end = end & VTD_PAGE_MASK;
+       if (end < dmar_domain->max_addr) {
+               printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                      "sufficient for the mapped address (%llx)\n",
+                      __func__, iommu->agaw, dmar_domain->max_addr);
+               return -EFAULT;
+       }
+
+       ret = domain_context_mapping(dmar_domain, pdev);
+       if (ret)
+               return ret;
+
+       ret = vm_domain_add_dev_info(dmar_domain, pdev);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_page_mapping);
 
-void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
+static void intel_iommu_detach_device(struct iommu_domain *domain,
+                                     struct device *dev)
 {
-       struct intel_iommu *iommu;
+       struct dmar_domain *dmar_domain = domain->priv;
+       struct pci_dev *pdev = to_pci_dev(dev);
 
-       iommu = device_to_iommu(bus, devfn);
-       iommu_detach_dev(iommu, bus, devfn);
+       vm_domain_remove_one_dev_info(dmar_domain, pdev);
 }
-EXPORT_SYMBOL_GPL(intel_iommu_detach_dev);
 
-struct dmar_domain *
-intel_iommu_find_domain(struct pci_dev *pdev)
+static int intel_iommu_map_range(struct iommu_domain *domain,
+                                unsigned long iova, phys_addr_t hpa,
+                                size_t size, int iommu_prot)
 {
-       return find_domain(pdev);
+       struct dmar_domain *dmar_domain = domain->priv;
+       u64 max_addr;
+       int addr_width;
+       int prot = 0;
+       int ret;
+
+       if (iommu_prot & IOMMU_READ)
+               prot |= DMA_PTE_READ;
+       if (iommu_prot & IOMMU_WRITE)
+               prot |= DMA_PTE_WRITE;
+
+       max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
+       if (dmar_domain->max_addr < max_addr) {
+               int min_agaw;
+               u64 end;
+
+               /* check if minimum agaw is sufficient for mapped address */
+               min_agaw = vm_domain_min_agaw(dmar_domain);
+               addr_width = agaw_to_width(min_agaw);
+               end = DOMAIN_MAX_ADDR(addr_width);
+               end = end & VTD_PAGE_MASK;
+               if (end < max_addr) {
+                       printk(KERN_ERR "%s: iommu agaw (%d) is not "
+                              "sufficient for the mapped address (%llx)\n",
+                              __func__, min_agaw, max_addr);
+                       return -EFAULT;
+               }
+               dmar_domain->max_addr = max_addr;
+       }
+
+       ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
+       return ret;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_find_domain);
 
-int intel_iommu_found(void)
+static void intel_iommu_unmap_range(struct iommu_domain *domain,
+                                   unsigned long iova, size_t size)
 {
-       return g_num_of_iommus;
+       struct dmar_domain *dmar_domain = domain->priv;
+       dma_addr_t base;
+
+       /* The address might not be aligned */
+       base = iova & VTD_PAGE_MASK;
+       size = VTD_PAGE_ALIGN(size);
+       dma_pte_clear_range(dmar_domain, base, base + size);
+
+       if (dmar_domain->max_addr == base + size)
+               dmar_domain->max_addr = base;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_found);
 
-u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
+static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
+                                           unsigned long iova)
 {
+       struct dmar_domain *dmar_domain = domain->priv;
        struct dma_pte *pte;
-       u64 pfn;
-
-       pfn = 0;
-       pte = addr_to_dma_pte(domain, iova);
+       u64 phys = 0;
 
+       pte = addr_to_dma_pte(dmar_domain, iova);
        if (pte)
-               pfn = dma_pte_addr(pte);
+               phys = dma_pte_addr(pte);
 
-       return pfn >> VTD_PAGE_SHIFT;
+       return phys;
 }
-EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
+
+static struct iommu_ops intel_iommu_ops = {
+       .domain_init    = intel_iommu_domain_init,
+       .domain_destroy = intel_iommu_domain_destroy,
+       .attach_dev     = intel_iommu_attach_device,
+       .detach_dev     = intel_iommu_detach_device,
+       .map            = intel_iommu_map_range,
+       .unmap          = intel_iommu_unmap_range,
+       .iova_to_phys   = intel_iommu_iova_to_phys,
+};