]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
iommu/iova: Fix tracking of recently failed iova address
authorRobert Richter <rrichter@marvell.com>
Wed, 20 Mar 2019 18:57:23 +0000 (18:57 +0000)
committerJoerg Roedel <jroedel@suse.de>
Fri, 22 Mar 2019 11:01:58 +0000 (12:01 +0100)
If a 32 bit allocation request is too big to possibly succeed, it
early exits with a failure and then should never update max32_alloc_
size. This patch fixes current code, now the size is only updated if
the slow path failed while walking the tree. Without the fix the
allocation may enter the slow path again even if there was a failure
before of a request with the same or a smaller size.

Cc: <stable@vger.kernel.org> # 4.20+
Fixes: bee60e94a1e2 ("iommu/iova: Optimise attempts to allocate iova from 32bit address range")
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Robert Richter <rrichter@marvell.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iova.c

index f8d3ba2475237f4477994a7c8b8b1cae0cfe3310..2de8122e218fde5856867252679b0b95682f3619 100644 (file)
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
                curr_iova = rb_entry(curr, struct iova, node);
        } while (curr && new_pfn <= curr_iova->pfn_hi);
 
-       if (limit_pfn < size || new_pfn < iovad->start_pfn)
+       if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+               iovad->max32_alloc_size = size;
                goto iova32_full;
+       }
 
        /* pfn_lo will point to size aligned address if size_aligned is set */
        new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        return 0;
 
 iova32_full:
-       iovad->max32_alloc_size = size;
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return -ENOMEM;
 }