]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/iommu/intel-iommu.c
Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek...
[mirror_ubuntu-bionic-kernel.git] / drivers / iommu / intel-iommu.c
index cbe7c49b756597a7f0be9e534ae4c5b6c55dce01..f5e02f8e737113123991607219ad23a12b2d1c54 100644 (file)
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
        u64     end_address;            /* reserved end address */
        struct dmar_dev_scope *devices; /* target devices */
        int     devices_cnt;            /* target device count */
+       struct iommu_resv_region *resv; /* reserved region handle */
 };
 
 struct dmar_atsr_unit {
@@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
                if (!dma_pte_present(pte) || dma_pte_superpage(pte))
                        goto next;
 
-               level_pfn = pfn & level_mask(level - 1);
+               level_pfn = pfn & level_mask(level);
                level_pte = phys_to_virt(dma_pte_addr(pte));
 
                if (level > 2)
@@ -2037,6 +2038,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        if (context_present(context))
                goto out_unlock;
 
+       /*
+        * For kdump cases, old valid entries may be cached due to the
+        * in-flight DMA and copied pgtable, but there is no unmapping
+        * behaviour for them, thus we need an explicit cache flush for
+        * the newly-mapped device. For kdump, at this point, the device
+        * is supposed to finish reset at its driver probe stage, so no
+        * in-flight DMA will exist, and we don't need to worry anymore
+        * hereafter.
+        */
+       if (context_copied(context)) {
+               u16 did_old = context_domain_id(context);
+
+               if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+                       iommu->flush.flush_context(iommu, did_old,
+                                                  (((u16)bus) << 8) | devfn,
+                                                  DMA_CCMD_MASK_NOBIT,
+                                                  DMA_CCMD_DEVICE_INVL);
+       }
+
        pgd = domain->pgd;
 
        context_clear_entry(context);
@@ -3306,13 +3326,14 @@ static int __init init_dmars(void)
        iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
 
+       check_tylersburg_isoch();
+
        if (iommu_identity_mapping) {
                ret = si_domain_init(hw_pass_through);
                if (ret)
                        goto free_iommu;
        }
 
-       check_tylersburg_isoch();
 
        /*
         * If we copied translations from a previous kernel in the kdump
@@ -4227,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
 {
        struct acpi_dmar_reserved_memory *rmrr;
+       int prot = DMA_PTE_READ|DMA_PTE_WRITE;
        struct dmar_rmrr_unit *rmrru;
+       size_t length;
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
-               return -ENOMEM;
+               goto out;
 
        rmrru->hdr = header;
        rmrr = (struct acpi_dmar_reserved_memory *)header;
        rmrru->base_address = rmrr->base_address;
        rmrru->end_address = rmrr->end_address;
+
+       length = rmrr->end_address - rmrr->base_address + 1;
+       rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
+                                             IOMMU_RESV_DIRECT);
+       if (!rmrru->resv)
+               goto free_rmrru;
+
        rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
                                ((void *)rmrr) + rmrr->header.length,
                                &rmrru->devices_cnt);
-       if (rmrru->devices_cnt && rmrru->devices == NULL) {
-               kfree(rmrru);
-               return -ENOMEM;
-       }
+       if (rmrru->devices_cnt && rmrru->devices == NULL)
+               goto free_all;
 
        list_add(&rmrru->list, &dmar_rmrr_units);
 
        return 0;
+free_all:
+       kfree(rmrru->resv);
+free_rmrru:
+       kfree(rmrru);
+out:
+       return -ENOMEM;
 }
 
 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
@@ -4461,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
        list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
                list_del(&rmrru->list);
                dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+               kfree(rmrru->resv);
                kfree(rmrru);
        }
 
@@ -5187,7 +5222,65 @@ static void intel_iommu_remove_device(struct device *dev)
        iommu_device_unlink(&iommu->iommu, dev);
 }
 
+static void intel_iommu_get_resv_regions(struct device *device,
+                                        struct list_head *head)
+{
+       struct iommu_resv_region *reg;
+       struct dmar_rmrr_unit *rmrr;
+       struct device *i_dev;
+       int i;
+
+       rcu_read_lock();
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, i_dev) {
+                       if (i_dev != device)
+                               continue;
+
+                       list_add_tail(&rmrr->resv->list, head);
+               }
+       }
+       rcu_read_unlock();
+
+       reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+                                     IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+                                     0, IOMMU_RESV_RESERVED);
+       if (!reg)
+               return;
+       list_add_tail(&reg->list, head);
+}
+
+static void intel_iommu_put_resv_regions(struct device *dev,
+                                        struct list_head *head)
+{
+       struct iommu_resv_region *entry, *next;
+
+       list_for_each_entry_safe(entry, next, head, list) {
+               if (entry->type == IOMMU_RESV_RESERVED)
+                       kfree(entry);
+       }
+}
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
+#define MAX_NR_PASID_BITS (20)
+static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+{
+       /*
+        * Convert ecap_pss to extend context entry pts encoding, also
+        * respect the soft pasid_max value set by the iommu.
+        * - number of PASID bits = ecap_pss + 1
+        * - number of PASID table entries = 2^(pts + 5)
+        * Therefore, pts = ecap_pss - 4
+        * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
+        */
+       if (ecap_pss(iommu->ecap) < 5)
+               return 0;
+
+       /* pasid_max is encoded as actual number of entries not the bits */
+       return find_first_bit((unsigned long *)&iommu->pasid_max,
+                       MAX_NR_PASID_BITS) - 5;
+}
+
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
 {
        struct device_domain_info *info;
@@ -5220,7 +5313,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
 
        if (!(ctx_lo & CONTEXT_PASIDE)) {
                context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
-               context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
+               context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
+                       intel_iommu_get_pts(iommu);
+
                wmb();
                /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
                 * extended to permit requests-with-PASID if the PASIDE bit
@@ -5296,19 +5391,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 #endif /* CONFIG_INTEL_IOMMU_SVM */
 
 const struct iommu_ops intel_iommu_ops = {
-       .capable        = intel_iommu_capable,
-       .domain_alloc   = intel_iommu_domain_alloc,
-       .domain_free    = intel_iommu_domain_free,
-       .attach_dev     = intel_iommu_attach_device,
-       .detach_dev     = intel_iommu_detach_device,
-       .map            = intel_iommu_map,
-       .unmap          = intel_iommu_unmap,
-       .map_sg         = default_iommu_map_sg,
-       .iova_to_phys   = intel_iommu_iova_to_phys,
-       .add_device     = intel_iommu_add_device,
-       .remove_device  = intel_iommu_remove_device,
-       .device_group   = pci_device_group,
-       .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
+       .capable                = intel_iommu_capable,
+       .domain_alloc           = intel_iommu_domain_alloc,
+       .domain_free            = intel_iommu_domain_free,
+       .attach_dev             = intel_iommu_attach_device,
+       .detach_dev             = intel_iommu_detach_device,
+       .map                    = intel_iommu_map,
+       .unmap                  = intel_iommu_unmap,
+       .map_sg                 = default_iommu_map_sg,
+       .iova_to_phys           = intel_iommu_iova_to_phys,
+       .add_device             = intel_iommu_add_device,
+       .remove_device          = intel_iommu_remove_device,
+       .get_resv_regions       = intel_iommu_get_resv_regions,
+       .put_resv_regions       = intel_iommu_put_resv_regions,
+       .device_group           = pci_device_group,
+       .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };
 
 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)