]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
IB/mlx4: Prevent cross page boundary allocation
authorChuck Lever <chuck.lever@oracle.com>
Wed, 22 Jun 2016 14:27:32 +0000 (17:27 +0300)
committerDoug Ledford <dledford@redhat.com>
Thu, 23 Jun 2016 14:08:25 +0000 (10:08 -0400)
Prevent cross page boundary allocation by allocating
new page, this is required to be aligned with ConnectX-3 HW
requirements.

Not doing that might cause to "RDMA read local protection" error.

Fixes: 1b2cd0fc673c ('IB/mlx4: Support the new memory registration API')
Suggested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c

index 6c5ac5d8f32ffcfafbb4dd7a4b9202b4049b95b5..29acda249612dd444ee42c37494674cc4e482790 100644 (file)
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
        u32                     max_pages;
        struct mlx4_mr          mmr;
        struct ib_umem         *umem;
-       void                    *pages_alloc;
+       size_t                  page_map_size;
 };
 
 struct mlx4_ib_mw {
index 631272172a0b9793c0074e6ab9c00a4b9f2e4665..5d73989d977135d9d560add27e074417ceaca258 100644 (file)
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
                      struct mlx4_ib_mr *mr,
                      int max_pages)
 {
-       int size = max_pages * sizeof(u64);
-       int add_size;
        int ret;
 
-       add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+       /* Ensure that size is aligned to DMA cacheline
+        * requirements.
+        * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+        * so page_map_size will never cross PAGE_SIZE.
+        */
+       mr->page_map_size = roundup(max_pages * sizeof(u64),
+                                   MLX4_MR_PAGES_ALIGN);
 
-       mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
-       if (!mr->pages_alloc)
+       /* Prevent cross page boundary allocation. */
+       mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+       if (!mr->pages)
                return -ENOMEM;
 
-       mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
        mr->page_map = dma_map_single(device->dma_device, mr->pages,
-                                     size, DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        if (dma_mapping_error(device->dma_device, mr->page_map)) {
                ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
        }
 
        return 0;
-err:
-       kfree(mr->pages_alloc);
 
+err:
+       free_page((unsigned long)mr->pages);
        return ret;
 }
 
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 {
        if (mr->pages) {
                struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_pages * sizeof(u64);
 
                dma_unmap_single(device->dma_device, mr->page_map,
-                                size, DMA_TO_DEVICE);
-               kfree(mr->pages_alloc);
+                                mr->page_map_size, DMA_TO_DEVICE);
+               free_page((unsigned long)mr->pages);
                mr->pages = NULL;
        }
 }
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        mr->npages = 0;
 
        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
-                                  sizeof(u64) * mr->max_pages,
-                                  DMA_TO_DEVICE);
+                                  mr->page_map_size, DMA_TO_DEVICE);
 
        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 
        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
-                                     sizeof(u64) * mr->max_pages,
-                                     DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        return rc;
 }