]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
bnxt_en: Enhance bnxt_alloc_ring()/bnxt_free_ring().
authorMichael Chan <michael.chan@broadcom.com>
Thu, 20 Dec 2018 08:38:49 +0000 (03:38 -0500)
committerDavid S. Miller <davem@davemloft.net>
Thu, 20 Dec 2018 16:26:16 +0000 (08:26 -0800)
To support level 2 context page memory structures, enhance the
bnxt_ring_mem_info structure with a "depth" field to specify the page
level and add a flag to specify using full pages for L1 and L2 page
tables.  This is needed to support RDMA functionality on 57500 chips
since RDMA requires more context memory.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h

index 79280c8f026b3515ed1941fb6be45b37d9ddf11b..a46e2ff3a77384ca86752368e7c19fc905c30354 100644 (file)
@@ -2375,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
                rmem->pg_arr[i] = NULL;
        }
        if (rmem->pg_tbl) {
-               dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+               size_t pg_tbl_size = rmem->nr_pages * 8;
+
+               if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+                       pg_tbl_size = rmem->page_size;
+               dma_free_coherent(&pdev->dev, pg_tbl_size,
                                  rmem->pg_tbl, rmem->pg_tbl_map);
                rmem->pg_tbl = NULL;
        }
@@ -2393,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 
        if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
                valid_bit = PTU_PTE_VALID;
-       if (rmem->nr_pages > 1) {
-               rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
-                                                 rmem->nr_pages * 8,
+       if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+               size_t pg_tbl_size = rmem->nr_pages * 8;
+
+               if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+                       pg_tbl_size = rmem->page_size;
+               rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
                                                  &rmem->pg_tbl_map,
                                                  GFP_KERNEL);
                if (!rmem->pg_tbl)
@@ -2412,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
                if (!rmem->pg_arr[i])
                        return -ENOMEM;
 
-               if (rmem->nr_pages > 1) {
+               if (rmem->nr_pages > 1 || rmem->depth > 0) {
                        if (i == rmem->nr_pages - 2 &&
                            (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
                                extra_bits |= PTU_PTE_NEXT_TO_LAST;
index c3334c0d1e6208209bd3af55c85e8d32d83d3019..1a5d02beee37e1ad4311c4079948067442de56c9 100644 (file)
@@ -617,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
 struct bnxt_ring_mem_info {
        int                     nr_pages;
        int                     page_size;
-       u32                     flags;
+       u16                     flags;
 #define BNXT_RMEM_VALID_PTE_FLAG       1
 #define BNXT_RMEM_RING_PTE_FLAG                2
+#define BNXT_RMEM_USE_FULL_PAGE_FLAG   4
+
+       u16                     depth;
 
        void                    **pg_arr;
        dma_addr_t              *dma_arr;