]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
net-next/hinic:replace multiply and division operators
authorXue Chaojing <xuechaojing@huawei.com>
Mon, 18 Mar 2019 08:29:26 +0000 (16:29 +0800)
committerStefan Bader <stefan.bader@canonical.com>
Mon, 1 Apr 2019 12:37:29 +0000 (14:37 +0200)
BugLink: https://bugs.launchpad.net/bugs/1820187
To improve performance, this patch uses bit operations to replace
multiply and division operators.

Signed-off-by: Xue Chaojing <xuechaojing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit ebda9b46cebc9c1245fcfe96c76525717ef984cc)
Signed-off-by: Ike Panhc <ike.pan@canonical.com>
Acked-by: Kleber Souza <kleber.souza@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h

index f92f1bf3901a7457b338ae387151c603dd28ff88..1dfa7eb05c102cce6667aeaad0a51265684ebd5c 100644 (file)
                        ((void *)((cmdq_pages)->shadow_page_vaddr) \
                                + (wq)->block_idx * CMDQ_BLOCK_SIZE)
 
-#define WQE_PAGE_OFF(wq, idx)   (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
-                                       (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx)   (((idx) / ((wq)->num_wqebbs_per_page)) \
-                                       & ((wq)->num_q_pages - 1))
-
 #define WQ_PAGE_ADDR(wq, idx)           \
                        ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
 
                (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
                        / (wq)->max_wqe_size)
 
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+       return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+               << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+       return (((idx) >> ((wq)->wqebbs_per_page_shift))
+               & ((wq)->num_q_pages - 1));
+}
 /**
  * queue_alloc_page - allocate page for Queue
  * @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
        struct hinic_hwif *hwif = wqs->hwif;
        struct pci_dev *pdev = hwif->pdev;
        u16 num_wqebbs_per_page;
+       u16 wqebb_size_shift;
        int err;
 
-       if (wqebb_size == 0) {
-               dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+       if (!is_power_of_2(wqebb_size)) {
+               dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
                return -EINVAL;
        }
 
@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
                return -EINVAL;
        }
 
-       num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+       wqebb_size_shift = ilog2(wqebb_size);
+       num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+                               >> wqebb_size_shift;
 
-       if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+       if (!is_power_of_2(num_wqebbs_per_page)) {
                dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
                return -EINVAL;
        }
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
        wq->q_depth = q_depth;
        wq->max_wqe_size = max_wqe_size;
        wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+       wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+       wq->wqebb_size_shift = wqebb_size_shift;
        wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
        wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
        wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
                         u16 q_depth, u16 max_wqe_size)
 {
        struct pci_dev *pdev = hwif->pdev;
+       u16 num_wqebbs_per_page_shift;
        u16 num_wqebbs_per_page;
+       u16 wqebb_size_shift;
        int i, j, err = -ENOMEM;
 
-       if (wqebb_size == 0) {
-               dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+       if (!is_power_of_2(wqebb_size)) {
+               dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
                return -EINVAL;
        }
 
@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
                return -EINVAL;
        }
 
-       num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+       wqebb_size_shift = ilog2(wqebb_size);
+       num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+                               >> wqebb_size_shift;
 
-       if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+       if (!is_power_of_2(num_wqebbs_per_page)) {
                dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
                return -EINVAL;
        }
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
                dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
                return err;
        }
+       num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
 
        for (i = 0; i < cmdq_blocks; i++) {
                wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
                wq[i].q_depth = q_depth;
                wq[i].max_wqe_size = max_wqe_size;
                wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+               wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+               wq[i].wqebb_size_shift = wqebb_size_shift;
                wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
                wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
                wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
 
        *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
 
-       num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+       num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
 
        if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
                atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
  **/
 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
 {
-       int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+       int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+                       >> wq->wqebb_size_shift;
 
        atomic_add(num_wqebbs, &wq->cons_idx);
 
@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
                                    u16 *cons_idx)
 {
-       int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+       int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+                       >> wq->wqebb_size_shift;
        u16 curr_cons_idx, end_cons_idx;
        int curr_pg, end_pg;
 
index 9b66545ba563c90a294600b7927a58c5983235ad..0a936cd6709b940f211cde56c0460b3efad6be4e 100644 (file)
@@ -39,7 +39,8 @@ struct hinic_wq {
        u16             q_depth;
        u16             max_wqe_size;
        u16             num_wqebbs_per_page;
-
+       u16             wqebbs_per_page_shift;
+       u16             wqebb_size_shift;
        /* The addresses are 64 bit in the HW */
        u64             block_paddr;
        void            **shadow_block_vaddr;