]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/panfrost: Update io-pgtable API
authorRobin Murphy <robin.murphy@arm.com>
Mon, 22 Aug 2022 22:01:27 +0000 (23:01 +0100)
committerSteven Price <steven.price@arm.com>
Thu, 1 Sep 2022 10:18:57 +0000 (11:18 +0100)
Convert to io-pgtable's bulk {map,unmap}_pages() APIs, to help the old
single-page interfaces eventually go away. Unmapping heap BOs still
wants to be done a page at a time, but everything else can get the full
benefit of the more efficient interface.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Alyssa Rosenzweig <alyssa@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/daef7f8c134d989c55636a5790d8c0fcaca1bae3.1661205687.git.robin.murphy@arm.com
drivers/gpu/drm/panfrost/panfrost_mmu.c

index d3f82b26a631dbbb45e7ab15abb9c4acb85d1def..963d8e1997d5003fb0bf20be0dbde255b7370389 100644 (file)
@@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
        mmu_write(pfdev, MMU_INT_MASK, ~0);
 }
 
-static size_t get_pgsize(u64 addr, size_t size)
+static size_t get_pgsize(u64 addr, size_t size, size_t *count)
 {
-       if (addr & (SZ_2M - 1) || size < SZ_2M)
-               return SZ_4K;
+       size_t blk_offset = -addr % SZ_2M;
 
+       if (blk_offset || size < SZ_2M) {
+               *count = min_not_zero(blk_offset, size) / SZ_4K;
+               return SZ_4K;
+       }
+       *count = size / SZ_2M;
        return SZ_2M;
 }
 
@@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
                dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
 
                while (len) {
-                       size_t pgsize = get_pgsize(iova | paddr, len);
-
-                       ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
-                       iova += pgsize;
-                       paddr += pgsize;
-                       len -= pgsize;
+                       size_t pgcount, mapped = 0;
+                       size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
+
+                       ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+                                      GFP_KERNEL, &mapped);
+                       /* Don't get stuck if things have gone wrong */
+                       mapped = max(mapped, pgsize);
+                       iova += mapped;
+                       paddr += mapped;
+                       len -= mapped;
                }
        }
 
@@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
                mapping->mmu->as, iova, len);
 
        while (unmapped_len < len) {
-               size_t unmapped_page;
-               size_t pgsize = get_pgsize(iova, len - unmapped_len);
-
-               if (ops->iova_to_phys(ops, iova)) {
-                       unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
-                       WARN_ON(unmapped_page != pgsize);
+               size_t unmapped_page, pgcount;
+               size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+               if (bo->is_heap)
+                       pgcount = 1;
+               if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
+                       unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+                       WARN_ON(unmapped_page != pgsize * pgcount);
                }
-               iova += pgsize;
-               unmapped_len += pgsize;
+               iova += pgsize * pgcount;
+               unmapped_len += pgsize * pgcount;
        }
 
        panfrost_mmu_flush_range(pfdev, mapping->mmu,