]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
iommu: Hook up '->unmap_pages' driver callback
authorWill Deacon <will@kernel.org>
Wed, 16 Jun 2021 13:38:48 +0000 (06:38 -0700)
committerJoerg Roedel <jroedel@suse.de>
Mon, 26 Jul 2021 10:37:07 +0000 (12:37 +0200)
Extend iommu_pgsize() to populate an optional 'count' parameter so that
we can direct unmapping operation to the ->unmap_pages callback if it
has been provided by the driver.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/1623850736-389584-8-git-send-email-quic_c_gdjako@quicinc.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iommu.c

index 80e14c139d40f51f7ced8d0ba07cc0cd7de17447..725622c7e60345c229a351ce06b20699506d97ce 100644 (file)
@@ -2376,11 +2376,11 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 
 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
-                          phys_addr_t paddr, size_t size)
+                          phys_addr_t paddr, size_t size, size_t *count)
 {
-       unsigned int pgsize_idx;
+       unsigned int pgsize_idx, pgsize_idx_next;
        unsigned long pgsizes;
-       size_t pgsize;
+       size_t offset, pgsize, pgsize_next;
        unsigned long addr_merge = paddr | iova;
 
        /* Page sizes supported by the hardware and small enough for @size */
@@ -2396,7 +2396,36 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
        /* Pick the biggest page size remaining */
        pgsize_idx = __fls(pgsizes);
        pgsize = BIT(pgsize_idx);
+       if (!count)
+               return pgsize;
 
+       /* Find the next biggest support page size, if it exists */
+       pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+       if (!pgsizes)
+               goto out_set_count;
+
+       pgsize_idx_next = __ffs(pgsizes);
+       pgsize_next = BIT(pgsize_idx_next);
+
+       /*
+        * There's no point trying a bigger page size unless the virtual
+        * and physical addresses are similarly offset within the larger page.
+        */
+       if ((iova ^ paddr) & (pgsize_next - 1))
+               goto out_set_count;
+
+       /* Calculate the offset to the next page size alignment boundary */
+       offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+       /*
+        * If size is big enough to accommodate the larger page, reduce
+        * the number of smaller pages.
+        */
+       if (offset + pgsize_next <= size)
+               size = offset;
+
+out_set_count:
+       *count = size >> pgsize_idx;
        return pgsize;
 }
 
@@ -2434,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               size_t pgsize = iommu_pgsize(domain, iova, paddr, size);
+               size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
 
                pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
                         iova, &paddr, pgsize);
@@ -2485,6 +2514,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
 }
 EXPORT_SYMBOL_GPL(iommu_map_atomic);
 
+static size_t __iommu_unmap_pages(struct iommu_domain *domain,
+                                 unsigned long iova, size_t size,
+                                 struct iommu_iotlb_gather *iotlb_gather)
+{
+       const struct iommu_ops *ops = domain->ops;
+       size_t pgsize, count;
+
+       pgsize = iommu_pgsize(domain, iova, iova, size, &count);
+       return ops->unmap_pages ?
+              ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
+              ops->unmap(domain, iova, pgsize, iotlb_gather);
+}
+
 static size_t __iommu_unmap(struct iommu_domain *domain,
                            unsigned long iova, size_t size,
                            struct iommu_iotlb_gather *iotlb_gather)
@@ -2494,7 +2536,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
        unsigned long orig_iova = iova;
        unsigned int min_pagesz;
 
-       if (unlikely(ops->unmap == NULL ||
+       if (unlikely(!(ops->unmap || ops->unmap_pages) ||
                     domain->pgsize_bitmap == 0UL))
                return 0;
 
@@ -2522,10 +2564,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
         * or we hit an area that isn't mapped.
         */
        while (unmapped < size) {
-               size_t pgsize;
-
-               pgsize = iommu_pgsize(domain, iova, iova, size - unmapped);
-               unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
+               unmapped_page = __iommu_unmap_pages(domain, iova,
+                                                   size - unmapped,
+                                                   iotlb_gather);
                if (!unmapped_page)
                        break;