]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
RDMA/bnxt_re: Use core helpers to get aligned DMA address
authorShiraz Saleem <shiraz.saleem@intel.com>
Mon, 6 May 2019 13:53:35 +0000 (08:53 -0500)
committerJason Gunthorpe <jgg@mellanox.com>
Mon, 6 May 2019 16:08:11 +0000 (13:08 -0300)
Call the core helpers to retrieve the HW aligned address to use for the
MR, within a supported bnxt_re page size.

Remove checking the umem->hugtetlb flag as it is no longer required. The
new DMA block iterator will return the 2M aligned address if the MR is
backed by 2M huge pages.

Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/bnxt_re/ib_verbs.c

index cde789cb691b76f8e836c205c7ccd7155a61e2d6..2c3685faa57a42defe87428d5cdf3f01f4136b2f 100644 (file)
@@ -3507,17 +3507,12 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
                             int page_shift)
 {
        u64 *pbl_tbl = pbl_tbl_orig;
-       u64 paddr;
-       u64 page_mask = (1ULL << page_shift) - 1;
-       struct sg_dma_page_iter sg_iter;
+       u64 page_size =  BIT_ULL(page_shift);
+       struct ib_block_iter biter;
+
+       rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
+               *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
 
-       for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
-               paddr = sg_page_iter_dma_address(&sg_iter);
-               if (pbl_tbl == pbl_tbl_orig)
-                       *pbl_tbl++ = paddr & ~page_mask;
-               else if ((paddr & page_mask) == 0)
-                       *pbl_tbl++ = paddr;
-       }
        return pbl_tbl - pbl_tbl_orig;
 }
 
@@ -3579,7 +3574,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
                goto free_umem;
        }
 
-       page_shift = PAGE_SHIFT;
+       page_shift = __ffs(ib_umem_find_best_pgsz(umem,
+                               BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
+                               virt_addr));
 
        if (!bnxt_re_page_size_ok(page_shift)) {
                dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
@@ -3587,17 +3584,13 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
                goto fail;
        }
 
-       if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+       if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
+           length > BNXT_RE_MAX_MR_SIZE_LOW) {
                dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
                        length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
                rc = -EINVAL;
                goto fail;
        }
-       if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
-               page_shift = BNXT_RE_PAGE_SHIFT_2M;
-               dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
-                        1 << page_shift);
-       }
 
        /* Map umem buf ptrs to the PBL */
        umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);