]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/net/ethernet/broadcom/bnx2.c
be2net: Fix INTx processing for Lancer
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnx2.c
index 965c7235804d61aa4c493b472239a29bde66bcb7..787e1757973a2ef95410e4048fd58c24dd4bc6dc 100644 (file)
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
        mutex_lock(&bp->cnic_lock);
        cp->drv_state = 0;
        bnapi->cnic_present = 0;
-       rcu_assign_pointer(bp->cnic_ops, NULL);
+       RCU_INIT_POINTER(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_lock);
        synchronize_rcu();
        return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
 
        if (bp->autoneg & AUTONEG_SPEED) {
                u32 adv_reg, adv1000_reg;
-               u32 new_adv_reg = 0;
-               u32 new_adv1000_reg = 0;
+               u32 new_adv = 0;
+               u32 new_adv1000 = 0;
 
                bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
                adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
                bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
                adv1000_reg &= PHY_ALL_1000_SPEED;
 
-               if (bp->advertising & ADVERTISED_10baseT_Half)
-                       new_adv_reg |= ADVERTISE_10HALF;
-               if (bp->advertising & ADVERTISED_10baseT_Full)
-                       new_adv_reg |= ADVERTISE_10FULL;
-               if (bp->advertising & ADVERTISED_100baseT_Half)
-                       new_adv_reg |= ADVERTISE_100HALF;
-               if (bp->advertising & ADVERTISED_100baseT_Full)
-                       new_adv_reg |= ADVERTISE_100FULL;
-               if (bp->advertising & ADVERTISED_1000baseT_Full)
-                       new_adv1000_reg |= ADVERTISE_1000FULL;
+               new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+               new_adv |= ADVERTISE_CSMA;
+               new_adv |= bnx2_phy_get_pause_adv(bp);
 
-               new_adv_reg |= ADVERTISE_CSMA;
+               new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
 
-               new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
-               if ((adv1000_reg != new_adv1000_reg) ||
-                       (adv_reg != new_adv_reg) ||
+               if ((adv1000_reg != new_adv1000) ||
+                       (adv_reg != new_adv) ||
                        ((bmcr & BMCR_ANENABLE) == 0)) {
 
-                       bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
-                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+                       bnx2_write_phy(bp, bp->mii_adv, new_adv);
+                       bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
                        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
                                BMCR_ANENABLE);
                }
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
 }
 
 static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
-       struct sk_buff *skb;
+       u8 *data;
        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
        dma_addr_t mapping;
        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
-       unsigned long align;
 
-       skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
-       if (skb == NULL) {
+       data = kmalloc(bp->rx_buf_size, gfp);
+       if (!data)
                return -ENOMEM;
-       }
 
-       if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
-               skb_reserve(skb, BNX2_RX_ALIGN - align);
-
-       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+       mapping = dma_map_single(&bp->pdev->dev,
+                                get_l2_fhdr(data),
+                                bp->rx_buf_use_size,
                                 PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-               dev_kfree_skb(skb);
+               kfree(data);
                return -EIO;
        }
 
-       rx_buf->skb = skb;
-       rx_buf->desc = (struct l2_fhdr *) skb->data;
+       rx_buf->data = data;
        dma_unmap_addr_set(rx_buf, mapping, mapping);
 
        rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
        struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
        u16 hw_cons, sw_cons, sw_ring_cons;
        int tx_pkt = 0, index;
+       unsigned int tx_bytes = 0;
        struct netdev_queue *txq;
 
        index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
 
                sw_cons = NEXT_TX_BD(sw_cons);
 
+               tx_bytes += skb->len;
                dev_kfree_skb(skb);
                tx_pkt++;
                if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        hw_cons = bnx2_get_hw_tx_cons(bnapi);
        }
 
+       netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
        txr->hw_tx_cons = hw_cons;
        txr->tx_cons = sw_cons;
 
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 }
 
 static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
-                 struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+                  u8 *data, u16 cons, u16 prod)
 {
        struct sw_bd *cons_rx_buf, *prod_rx_buf;
        struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
 
        rxr->rx_prod_bseq += bp->rx_buf_use_size;
 
-       prod_rx_buf->skb = skb;
-       prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+       prod_rx_buf->data = data;
 
        if (cons == prod)
                return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
 }
 
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
            unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
            u32 ring_idx)
 {
        int err;
        u16 prod = ring_idx & 0xffff;
+       struct sk_buff *skb;
 
-       err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+       err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
        if (unlikely(err)) {
-               bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+               bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
                if (hdr_len) {
                        unsigned int raw_len = len + 4;
                        int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
 
                        bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
                }
-               return err;
+               return NULL;
        }
 
-       skb_reserve(skb, BNX2_RX_OFFSET);
        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
                         PCI_DMA_FROMDEVICE);
-
+       skb = build_skb(data);
+       if (!skb) {
+               kfree(data);
+               goto error;
+       }
+       skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
        if (hdr_len == 0) {
                skb_put(skb, len);
-               return 0;
+               return skb;
        } else {
                unsigned int i, frag_len, frag_size, pages;
                struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                        skb_frag_size_sub(frag, tail);
                                        skb->data_len -= tail;
                                }
-                               return 0;
+                               return skb;
                        }
                        rx_pg = &rxr->rx_pg_ring[pg_cons];
 
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                rxr->rx_pg_prod = pg_prod;
                                bnx2_reuse_rx_skb_pages(bp, rxr, skb,
                                                        pages - i);
-                               return err;
+                               return NULL;
                        }
 
                        dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                rxr->rx_pg_prod = pg_prod;
                rxr->rx_pg_cons = pg_cons;
        }
-       return 0;
+       return skb;
 }
 
 static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                struct sw_bd *rx_buf, *next_rx_buf;
                struct sk_buff *skb;
                dma_addr_t dma_addr;
+               u8 *data;
 
                sw_ring_cons = RX_RING_IDX(sw_cons);
                sw_ring_prod = RX_RING_IDX(sw_prod);
 
                rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
-               skb = rx_buf->skb;
-               prefetchw(skb);
-
-               next_rx_buf =
-                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
-               prefetch(next_rx_buf->desc);
+               data = rx_buf->data;
+               rx_buf->data = NULL;
 
-               rx_buf->skb = NULL;
+               rx_hdr = get_l2_fhdr(data);
+               prefetch(rx_hdr);
 
                dma_addr = dma_unmap_addr(rx_buf, mapping);
 
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
                        PCI_DMA_FROMDEVICE);
 
-               rx_hdr = rx_buf->desc;
+               next_rx_buf =
+                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+               prefetch(get_l2_fhdr(next_rx_buf->data));
+
                len = rx_hdr->l2_fhdr_pkt_len;
                status = rx_hdr->l2_fhdr_status;
 
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                                       L2_FHDR_ERRORS_TOO_SHORT |
                                       L2_FHDR_ERRORS_GIANT_FRAME))) {
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                          sw_ring_prod);
                        if (pg_ring_used) {
                                int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                len -= 4;
 
                if (len <= bp->rx_copy_thresh) {
-                       struct sk_buff *new_skb;
-
-                       new_skb = netdev_alloc_skb(bp->dev, len + 6);
-                       if (new_skb == NULL) {
-                               bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+                       skb = netdev_alloc_skb(bp->dev, len + 6);
+                       if (skb == NULL) {
+                               bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
                                                  sw_ring_prod);
                                goto next_rx;
                        }
 
                        /* aligned copy */
-                       skb_copy_from_linear_data_offset(skb,
-                                                        BNX2_RX_OFFSET - 6,
-                                     new_skb->data, len + 6);
-                       skb_reserve(new_skb, 6);
-                       skb_put(new_skb, len);
+                       memcpy(skb->data,
+                              (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+                              len + 6);
+                       skb_reserve(skb, 6);
+                       skb_put(skb, len);
 
-                       bnx2_reuse_rx_skb(bp, rxr, skb,
+                       bnx2_reuse_rx_data(bp, rxr, data,
                                sw_ring_cons, sw_ring_prod);
 
-                       skb = new_skb;
-               } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
-                          dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
-                       goto next_rx;
-
+               } else {
+                       skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+                                         (sw_ring_cons << 16) | sw_ring_prod);
+                       if (!skb)
+                               goto next_rx;
+               }
                if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
                    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
                        __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+               if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
                                    ring_num, i, bp->rx_ring_size);
                        break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
 
        rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
-               sizeof(struct skb_shared_info);
+               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
        bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
        bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
        }
 
        bp->rx_buf_use_size = rx_size;
-       /* hw alignment */
-       bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+       /* hw alignment + build_skb() overhead*/
+       bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+               NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
        bp->rx_ring_size = size;
        bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        }
                        dev_kfree_skb(skb);
                }
+               netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
        }
 }
 
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_ring_idx; j++) {
                        struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
-                       struct sk_buff *skb = rx_buf->skb;
+                       u8 *data = rx_buf->data;
 
-                       if (skb == NULL)
+                       if (data == NULL)
                                continue;
 
                        dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
 
-                       rx_buf->skb = NULL;
+                       rx_buf->data = NULL;
 
-                       dev_kfree_skb(skb);
+                       kfree(data);
                }
                for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
                        bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 {
        unsigned int pkt_size, num_pkts, i;
-       struct sk_buff *skb, *rx_skb;
+       struct sk_buff *skb;
+       u8 *data;
        unsigned char *packet;
        u16 rx_start_idx, rx_idx;
        dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        rx_buf = &rxr->rx_buf_ring[rx_start_idx];
-       rx_skb = rx_buf->skb;
+       data = rx_buf->data;
 
-       rx_hdr = rx_buf->desc;
-       skb_reserve(rx_skb, BNX2_RX_OFFSET);
+       rx_hdr = get_l2_fhdr(data);
+       data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
 
        dma_sync_single_for_cpu(&bp->pdev->dev,
                dma_unmap_addr(rx_buf, mapping),
-               bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+               bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
 
        if (rx_hdr->l2_fhdr_status &
                (L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        }
 
        for (i = 14; i < pkt_size; i++) {
-               if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+               if (*(data + i) != (unsigned char) (i & 0xff)) {
                        goto loopback_test_done;
                }
        }
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
        txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
 
+       netdev_tx_sent_queue(txq, skb->len);
+
        prod = NEXT_TX_BD(prod);
        txr->tx_prod_bseq += skb->len;
 
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       strcpy(info->bus_info, pci_name(bp->pdev));
-       strcpy(info->fw_version, bp->fw_version);
+       strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+       strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
 }
 
 #define BNX2_REGDUMP_LEN               (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
        return 0;
 }
 
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
 }
 
 static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct bnx2 *bp = netdev_priv(dev);